2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
39 #include "diag/fs_tracepoint.h"
41 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
42 sizeof(struct init_tree_node))
44 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
45 ...) {.type = FS_TYPE_PRIO,\
46 .min_ft_level = min_level_val,\
47 .num_levels = num_levels_val,\
48 .num_leaf_prios = num_prios_val,\
50 .children = (struct init_tree_node[]) {__VA_ARGS__},\
51 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
54 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
55 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
58 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
59 .children = (struct init_tree_node[]) {__VA_ARGS__},\
60 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
63 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
66 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
68 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
69 .caps = (long[]) {__VA_ARGS__} }
71 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
72 FS_CAP(flow_table_properties_nic_receive.modify_root), \
73 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
74 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
76 #define LEFTOVERS_NUM_LEVELS 1
77 #define LEFTOVERS_NUM_PRIOS 1
79 #define BY_PASS_PRIO_NUM_LEVELS 1
80 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
83 #define ETHTOOL_PRIO_NUM_LEVELS 1
84 #define ETHTOOL_NUM_PRIOS 11
85 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
86 /* Vlan, mac, ttc, inner ttc, aRFS */
87 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
88 #define KERNEL_NIC_NUM_PRIOS 1
89 /* One more level for tc */
90 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
92 #define ANCHOR_NUM_LEVELS 1
93 #define ANCHOR_NUM_PRIOS 1
94 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
96 #define OFFLOADS_MAX_FT 1
97 #define OFFLOADS_NUM_PRIOS 1
98 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
100 #define LAG_PRIO_NUM_LEVELS 1
101 #define LAG_NUM_PRIOS 1
102 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
109 static struct init_tree_node
{
110 enum fs_node_type type
;
111 struct init_tree_node
*children
;
113 struct node_caps caps
;
119 .type
= FS_TYPE_NAMESPACE
,
121 .children
= (struct init_tree_node
[]) {
122 ADD_PRIO(0, BY_PASS_MIN_LEVEL
, 0,
124 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS
,
125 BY_PASS_PRIO_NUM_LEVELS
))),
126 ADD_PRIO(0, LAG_MIN_LEVEL
, 0,
128 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS
,
129 LAG_PRIO_NUM_LEVELS
))),
130 ADD_PRIO(0, OFFLOADS_MIN_LEVEL
, 0, {},
131 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS
, OFFLOADS_MAX_FT
))),
132 ADD_PRIO(0, ETHTOOL_MIN_LEVEL
, 0,
134 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS
,
135 ETHTOOL_PRIO_NUM_LEVELS
))),
136 ADD_PRIO(0, KERNEL_MIN_LEVEL
, 0, {},
137 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
138 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS
,
139 KERNEL_NIC_PRIO_NUM_LEVELS
))),
140 ADD_PRIO(0, BY_PASS_MIN_LEVEL
, 0,
142 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS
, LEFTOVERS_NUM_LEVELS
))),
143 ADD_PRIO(0, ANCHOR_MIN_LEVEL
, 0, {},
144 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS
, ANCHOR_NUM_LEVELS
))),
148 enum fs_i_lock_class
{
154 static const struct rhashtable_params rhash_fte
= {
155 .key_len
= FIELD_SIZEOF(struct fs_fte
, val
),
156 .key_offset
= offsetof(struct fs_fte
, val
),
157 .head_offset
= offsetof(struct fs_fte
, hash
),
158 .automatic_shrinking
= true,
162 static const struct rhashtable_params rhash_fg
= {
163 .key_len
= FIELD_SIZEOF(struct mlx5_flow_group
, mask
),
164 .key_offset
= offsetof(struct mlx5_flow_group
, mask
),
165 .head_offset
= offsetof(struct mlx5_flow_group
, hash
),
166 .automatic_shrinking
= true,
171 static void del_hw_flow_table(struct fs_node
*node
);
172 static void del_hw_flow_group(struct fs_node
*node
);
173 static void del_hw_fte(struct fs_node
*node
);
174 static void del_sw_flow_table(struct fs_node
*node
);
175 static void del_sw_flow_group(struct fs_node
*node
);
176 static void del_sw_fte(struct fs_node
*node
);
177 static void del_sw_prio(struct fs_node
*node
);
178 static void del_sw_ns(struct fs_node
*node
);
179 /* Delete rule (destination) is special case that
180 * requires to lock the FTE for all the deletion process.
182 static void del_sw_hw_rule(struct fs_node
*node
);
183 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination
*d1
,
184 struct mlx5_flow_destination
*d2
);
185 static struct mlx5_flow_rule
*
186 find_flow_rule(struct fs_fte
*fte
,
187 struct mlx5_flow_destination
*dest
);
189 static void tree_init_node(struct fs_node
*node
,
190 void (*del_hw_func
)(struct fs_node
*),
191 void (*del_sw_func
)(struct fs_node
*))
193 refcount_set(&node
->refcount
, 1);
194 INIT_LIST_HEAD(&node
->list
);
195 INIT_LIST_HEAD(&node
->children
);
196 init_rwsem(&node
->lock
);
197 node
->del_hw_func
= del_hw_func
;
198 node
->del_sw_func
= del_sw_func
;
199 node
->active
= false;
202 static void tree_add_node(struct fs_node
*node
, struct fs_node
*parent
)
205 refcount_inc(&parent
->refcount
);
206 node
->parent
= parent
;
208 /* Parent is the root */
212 node
->root
= parent
->root
;
215 static int tree_get_node(struct fs_node
*node
)
217 return refcount_inc_not_zero(&node
->refcount
);
220 static void nested_down_read_ref_node(struct fs_node
*node
,
221 enum fs_i_lock_class
class)
224 down_read_nested(&node
->lock
, class);
225 refcount_inc(&node
->refcount
);
229 static void nested_down_write_ref_node(struct fs_node
*node
,
230 enum fs_i_lock_class
class)
233 down_write_nested(&node
->lock
, class);
234 refcount_inc(&node
->refcount
);
238 static void down_write_ref_node(struct fs_node
*node
)
241 down_write(&node
->lock
);
242 refcount_inc(&node
->refcount
);
246 static void up_read_ref_node(struct fs_node
*node
)
248 refcount_dec(&node
->refcount
);
249 up_read(&node
->lock
);
252 static void up_write_ref_node(struct fs_node
*node
)
254 refcount_dec(&node
->refcount
);
255 up_write(&node
->lock
);
258 static void tree_put_node(struct fs_node
*node
)
260 struct fs_node
*parent_node
= node
->parent
;
262 if (refcount_dec_and_test(&node
->refcount
)) {
263 if (node
->del_hw_func
)
264 node
->del_hw_func(node
);
266 /* Only root namespace doesn't have parent and we just
267 * need to free its node.
269 down_write_ref_node(parent_node
);
270 list_del_init(&node
->list
);
271 if (node
->del_sw_func
)
272 node
->del_sw_func(node
);
273 up_write_ref_node(parent_node
);
279 if (!node
&& parent_node
)
280 tree_put_node(parent_node
);
283 static int tree_remove_node(struct fs_node
*node
)
285 if (refcount_read(&node
->refcount
) > 1) {
286 refcount_dec(&node
->refcount
);
293 static struct fs_prio
*find_prio(struct mlx5_flow_namespace
*ns
,
296 struct fs_prio
*iter_prio
;
298 fs_for_each_prio(iter_prio
, ns
) {
299 if (iter_prio
->prio
== prio
)
306 static bool check_last_reserved(const u32
*match_criteria
)
308 char *match_criteria_reserved
=
309 MLX5_ADDR_OF(fte_match_param
, match_criteria
, MLX5_FTE_MATCH_PARAM_RESERVED
);
311 return !match_criteria_reserved
[0] &&
312 !memcmp(match_criteria_reserved
, match_criteria_reserved
+ 1,
313 MLX5_FLD_SZ_BYTES(fte_match_param
,
314 MLX5_FTE_MATCH_PARAM_RESERVED
) - 1);
317 static bool check_valid_mask(u8 match_criteria_enable
, const u32
*match_criteria
)
319 if (match_criteria_enable
& ~(
320 (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS
) |
321 (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS
) |
322 (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS
)))
325 if (!(match_criteria_enable
&
326 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS
)) {
327 char *fg_type_mask
= MLX5_ADDR_OF(fte_match_param
,
328 match_criteria
, outer_headers
);
330 if (fg_type_mask
[0] ||
331 memcmp(fg_type_mask
, fg_type_mask
+ 1,
332 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4
) - 1))
336 if (!(match_criteria_enable
&
337 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS
)) {
338 char *fg_type_mask
= MLX5_ADDR_OF(fte_match_param
,
339 match_criteria
, misc_parameters
);
341 if (fg_type_mask
[0] ||
342 memcmp(fg_type_mask
, fg_type_mask
+ 1,
343 MLX5_ST_SZ_BYTES(fte_match_set_misc
) - 1))
347 if (!(match_criteria_enable
&
348 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS
)) {
349 char *fg_type_mask
= MLX5_ADDR_OF(fte_match_param
,
350 match_criteria
, inner_headers
);
352 if (fg_type_mask
[0] ||
353 memcmp(fg_type_mask
, fg_type_mask
+ 1,
354 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4
) - 1))
358 return check_last_reserved(match_criteria
);
361 static bool check_valid_spec(const struct mlx5_flow_spec
*spec
)
365 if (!check_valid_mask(spec
->match_criteria_enable
, spec
->match_criteria
)) {
366 pr_warn("mlx5_core: Match criteria given mismatches match_criteria_enable\n");
370 for (i
= 0; i
< MLX5_ST_SZ_DW_MATCH_PARAM
; i
++)
371 if (spec
->match_value
[i
] & ~spec
->match_criteria
[i
]) {
372 pr_warn("mlx5_core: match_value differs from match_criteria\n");
376 return check_last_reserved(spec
->match_value
);
379 static struct mlx5_flow_root_namespace
*find_root(struct fs_node
*node
)
381 struct fs_node
*root
;
382 struct mlx5_flow_namespace
*ns
;
386 if (WARN_ON(root
->type
!= FS_TYPE_NAMESPACE
)) {
387 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
391 ns
= container_of(root
, struct mlx5_flow_namespace
, node
);
392 return container_of(ns
, struct mlx5_flow_root_namespace
, ns
);
395 static inline struct mlx5_flow_steering
*get_steering(struct fs_node
*node
)
397 struct mlx5_flow_root_namespace
*root
= find_root(node
);
400 return root
->dev
->priv
.steering
;
404 static inline struct mlx5_core_dev
*get_dev(struct fs_node
*node
)
406 struct mlx5_flow_root_namespace
*root
= find_root(node
);
413 static void del_sw_ns(struct fs_node
*node
)
418 static void del_sw_prio(struct fs_node
*node
)
423 static void del_hw_flow_table(struct fs_node
*node
)
425 struct mlx5_flow_table
*ft
;
426 struct mlx5_core_dev
*dev
;
429 fs_get_obj(ft
, node
);
430 dev
= get_dev(&ft
->node
);
433 err
= mlx5_cmd_destroy_flow_table(dev
, ft
);
435 mlx5_core_warn(dev
, "flow steering can't destroy ft\n");
439 static void del_sw_flow_table(struct fs_node
*node
)
441 struct mlx5_flow_table
*ft
;
442 struct fs_prio
*prio
;
444 fs_get_obj(ft
, node
);
446 rhltable_destroy(&ft
->fgs_hash
);
447 fs_get_obj(prio
, ft
->node
.parent
);
452 static void del_sw_hw_rule(struct fs_node
*node
)
454 struct mlx5_flow_rule
*rule
;
455 struct mlx5_flow_table
*ft
;
456 struct mlx5_flow_group
*fg
;
459 struct mlx5_core_dev
*dev
= get_dev(node
);
461 bool update_fte
= false;
463 fs_get_obj(rule
, node
);
464 fs_get_obj(fte
, rule
->node
.parent
);
465 fs_get_obj(fg
, fte
->node
.parent
);
466 fs_get_obj(ft
, fg
->node
.parent
);
467 trace_mlx5_fs_del_rule(rule
);
468 if (rule
->sw_action
== MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
) {
469 mutex_lock(&rule
->dest_attr
.ft
->lock
);
470 list_del(&rule
->next_ft
);
471 mutex_unlock(&rule
->dest_attr
.ft
->lock
);
474 if (rule
->dest_attr
.type
== MLX5_FLOW_DESTINATION_TYPE_COUNTER
&&
476 modify_mask
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION
);
477 fte
->action
&= ~MLX5_FLOW_CONTEXT_ACTION_COUNT
;
482 if ((fte
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) &&
484 modify_mask
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST
),
488 if (update_fte
&& fte
->dests_size
) {
489 err
= mlx5_cmd_update_fte(dev
, ft
, fg
->id
, modify_mask
, fte
);
492 "%s can't del rule fg id=%d fte_index=%d\n",
493 __func__
, fg
->id
, fte
->index
);
498 static void del_hw_fte(struct fs_node
*node
)
500 struct mlx5_flow_table
*ft
;
501 struct mlx5_flow_group
*fg
;
502 struct mlx5_core_dev
*dev
;
506 fs_get_obj(fte
, node
);
507 fs_get_obj(fg
, fte
->node
.parent
);
508 fs_get_obj(ft
, fg
->node
.parent
);
510 trace_mlx5_fs_del_fte(fte
);
511 dev
= get_dev(&ft
->node
);
513 err
= mlx5_cmd_delete_fte(dev
, ft
,
517 "flow steering can't delete fte in index %d of flow group id %d\n",
522 static void del_sw_fte(struct fs_node
*node
)
524 struct mlx5_flow_steering
*steering
= get_steering(node
);
525 struct mlx5_flow_group
*fg
;
529 fs_get_obj(fte
, node
);
530 fs_get_obj(fg
, fte
->node
.parent
);
532 err
= rhashtable_remove_fast(&fg
->ftes_hash
,
536 ida_simple_remove(&fg
->fte_allocator
, fte
->index
- fg
->start_index
);
537 kmem_cache_free(steering
->ftes_cache
, fte
);
540 static void del_hw_flow_group(struct fs_node
*node
)
542 struct mlx5_flow_group
*fg
;
543 struct mlx5_flow_table
*ft
;
544 struct mlx5_core_dev
*dev
;
546 fs_get_obj(fg
, node
);
547 fs_get_obj(ft
, fg
->node
.parent
);
548 dev
= get_dev(&ft
->node
);
549 trace_mlx5_fs_del_fg(fg
);
551 if (fg
->node
.active
&& mlx5_cmd_destroy_flow_group(dev
, ft
, fg
->id
))
552 mlx5_core_warn(dev
, "flow steering can't destroy fg %d of ft %d\n",
556 static void del_sw_flow_group(struct fs_node
*node
)
558 struct mlx5_flow_steering
*steering
= get_steering(node
);
559 struct mlx5_flow_group
*fg
;
560 struct mlx5_flow_table
*ft
;
563 fs_get_obj(fg
, node
);
564 fs_get_obj(ft
, fg
->node
.parent
);
566 rhashtable_destroy(&fg
->ftes_hash
);
567 ida_destroy(&fg
->fte_allocator
);
568 if (ft
->autogroup
.active
)
569 ft
->autogroup
.num_groups
--;
570 err
= rhltable_remove(&ft
->fgs_hash
,
574 kmem_cache_free(steering
->fgs_cache
, fg
);
577 static int insert_fte(struct mlx5_flow_group
*fg
, struct fs_fte
*fte
)
582 index
= ida_simple_get(&fg
->fte_allocator
, 0, fg
->max_ftes
, GFP_KERNEL
);
586 fte
->index
= index
+ fg
->start_index
;
587 ret
= rhashtable_insert_fast(&fg
->ftes_hash
,
593 tree_add_node(&fte
->node
, &fg
->node
);
594 list_add_tail(&fte
->node
.list
, &fg
->node
.children
);
598 ida_simple_remove(&fg
->fte_allocator
, index
);
602 static struct fs_fte
*alloc_fte(struct mlx5_flow_table
*ft
,
604 struct mlx5_flow_act
*flow_act
)
606 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
609 fte
= kmem_cache_zalloc(steering
->ftes_cache
, GFP_KERNEL
);
611 return ERR_PTR(-ENOMEM
);
613 memcpy(fte
->val
, match_value
, sizeof(fte
->val
));
614 fte
->node
.type
= FS_TYPE_FLOW_ENTRY
;
615 fte
->flow_tag
= flow_act
->flow_tag
;
616 fte
->action
= flow_act
->action
;
617 fte
->encap_id
= flow_act
->encap_id
;
618 fte
->modify_id
= flow_act
->modify_id
;
620 tree_init_node(&fte
->node
, del_hw_fte
, del_sw_fte
);
625 static void dealloc_flow_group(struct mlx5_flow_steering
*steering
,
626 struct mlx5_flow_group
*fg
)
628 rhashtable_destroy(&fg
->ftes_hash
);
629 kmem_cache_free(steering
->fgs_cache
, fg
);
632 static struct mlx5_flow_group
*alloc_flow_group(struct mlx5_flow_steering
*steering
,
633 u8 match_criteria_enable
,
634 void *match_criteria
,
638 struct mlx5_flow_group
*fg
;
641 fg
= kmem_cache_zalloc(steering
->fgs_cache
, GFP_KERNEL
);
643 return ERR_PTR(-ENOMEM
);
645 ret
= rhashtable_init(&fg
->ftes_hash
, &rhash_fte
);
647 kmem_cache_free(steering
->fgs_cache
, fg
);
650 ida_init(&fg
->fte_allocator
);
651 fg
->mask
.match_criteria_enable
= match_criteria_enable
;
652 memcpy(&fg
->mask
.match_criteria
, match_criteria
,
653 sizeof(fg
->mask
.match_criteria
));
654 fg
->node
.type
= FS_TYPE_FLOW_GROUP
;
655 fg
->start_index
= start_index
;
656 fg
->max_ftes
= end_index
- start_index
+ 1;
661 static struct mlx5_flow_group
*alloc_insert_flow_group(struct mlx5_flow_table
*ft
,
662 u8 match_criteria_enable
,
663 void *match_criteria
,
666 struct list_head
*prev
)
668 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
669 struct mlx5_flow_group
*fg
;
672 fg
= alloc_flow_group(steering
, match_criteria_enable
, match_criteria
,
673 start_index
, end_index
);
677 /* initialize refcnt, add to parent list */
678 ret
= rhltable_insert(&ft
->fgs_hash
,
682 dealloc_flow_group(steering
, fg
);
686 tree_init_node(&fg
->node
, del_hw_flow_group
, del_sw_flow_group
);
687 tree_add_node(&fg
->node
, &ft
->node
);
688 /* Add node to group list */
689 list_add(&fg
->node
.list
, prev
);
690 atomic_inc(&ft
->node
.version
);
695 static struct mlx5_flow_table
*alloc_flow_table(int level
, u16 vport
, int max_fte
,
696 enum fs_flow_table_type table_type
,
697 enum fs_flow_table_op_mod op_mod
,
700 struct mlx5_flow_table
*ft
;
703 ft
= kzalloc(sizeof(*ft
), GFP_KERNEL
);
705 return ERR_PTR(-ENOMEM
);
707 ret
= rhltable_init(&ft
->fgs_hash
, &rhash_fg
);
714 ft
->node
.type
= FS_TYPE_FLOW_TABLE
;
716 ft
->type
= table_type
;
718 ft
->max_fte
= max_fte
;
720 INIT_LIST_HEAD(&ft
->fwd_rules
);
721 mutex_init(&ft
->lock
);
726 /* If reverse is false, then we search for the first flow table in the
727 * root sub-tree from start(closest from right), else we search for the
728 * last flow table in the root sub-tree till start(closest from left).
730 static struct mlx5_flow_table
*find_closest_ft_recursive(struct fs_node
*root
,
731 struct list_head
*start
,
734 #define list_advance_entry(pos, reverse) \
735 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
737 #define list_for_each_advance_continue(pos, head, reverse) \
738 for (pos = list_advance_entry(pos, reverse); \
739 &pos->list != (head); \
740 pos = list_advance_entry(pos, reverse))
742 struct fs_node
*iter
= list_entry(start
, struct fs_node
, list
);
743 struct mlx5_flow_table
*ft
= NULL
;
748 list_for_each_advance_continue(iter
, &root
->children
, reverse
) {
749 if (iter
->type
== FS_TYPE_FLOW_TABLE
) {
750 fs_get_obj(ft
, iter
);
753 ft
= find_closest_ft_recursive(iter
, &iter
->children
, reverse
);
761 /* If reverse if false then return the first flow table in next priority of
762 * prio in the tree, else return the last flow table in the previous priority
763 * of prio in the tree.
765 static struct mlx5_flow_table
*find_closest_ft(struct fs_prio
*prio
, bool reverse
)
767 struct mlx5_flow_table
*ft
= NULL
;
768 struct fs_node
*curr_node
;
769 struct fs_node
*parent
;
771 parent
= prio
->node
.parent
;
772 curr_node
= &prio
->node
;
773 while (!ft
&& parent
) {
774 ft
= find_closest_ft_recursive(parent
, &curr_node
->list
, reverse
);
776 parent
= curr_node
->parent
;
781 /* Assuming all the tree is locked by mutex chain lock */
782 static struct mlx5_flow_table
*find_next_chained_ft(struct fs_prio
*prio
)
784 return find_closest_ft(prio
, false);
787 /* Assuming all the tree is locked by mutex chain lock */
788 static struct mlx5_flow_table
*find_prev_chained_ft(struct fs_prio
*prio
)
790 return find_closest_ft(prio
, true);
793 static int connect_fts_in_prio(struct mlx5_core_dev
*dev
,
794 struct fs_prio
*prio
,
795 struct mlx5_flow_table
*ft
)
797 struct mlx5_flow_table
*iter
;
801 fs_for_each_ft(iter
, prio
) {
803 err
= mlx5_cmd_modify_flow_table(dev
,
807 mlx5_core_warn(dev
, "Failed to modify flow table %d\n",
809 /* The driver is out of sync with the FW */
818 /* Connect flow tables from previous priority of prio to ft */
819 static int connect_prev_fts(struct mlx5_core_dev
*dev
,
820 struct mlx5_flow_table
*ft
,
821 struct fs_prio
*prio
)
823 struct mlx5_flow_table
*prev_ft
;
825 prev_ft
= find_prev_chained_ft(prio
);
827 struct fs_prio
*prev_prio
;
829 fs_get_obj(prev_prio
, prev_ft
->node
.parent
);
830 return connect_fts_in_prio(dev
, prev_prio
, ft
);
835 static int update_root_ft_create(struct mlx5_flow_table
*ft
, struct fs_prio
838 struct mlx5_flow_root_namespace
*root
= find_root(&prio
->node
);
839 struct mlx5_ft_underlay_qp
*uqp
;
840 int min_level
= INT_MAX
;
845 min_level
= root
->root_ft
->level
;
847 if (ft
->level
>= min_level
)
850 if (list_empty(&root
->underlay_qpns
)) {
851 /* Don't set any QPN (zero) in case QPN list is empty */
853 err
= mlx5_cmd_update_root_ft(root
->dev
, ft
, qpn
, false);
855 list_for_each_entry(uqp
, &root
->underlay_qpns
, list
) {
857 err
= mlx5_cmd_update_root_ft(root
->dev
, ft
, qpn
,
865 mlx5_core_warn(root
->dev
,
866 "Update root flow table of id(%u) qpn(%d) failed\n",
874 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule
*rule
,
875 struct mlx5_flow_destination
*dest
)
877 struct mlx5_flow_table
*ft
;
878 struct mlx5_flow_group
*fg
;
880 int modify_mask
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST
);
883 fs_get_obj(fte
, rule
->node
.parent
);
884 if (!(fte
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
))
886 down_write_ref_node(&fte
->node
);
887 fs_get_obj(fg
, fte
->node
.parent
);
888 fs_get_obj(ft
, fg
->node
.parent
);
890 memcpy(&rule
->dest_attr
, dest
, sizeof(*dest
));
891 err
= mlx5_cmd_update_fte(get_dev(&ft
->node
),
895 up_write_ref_node(&fte
->node
);
900 int mlx5_modify_rule_destination(struct mlx5_flow_handle
*handle
,
901 struct mlx5_flow_destination
*new_dest
,
902 struct mlx5_flow_destination
*old_dest
)
907 if (handle
->num_rules
!= 1)
909 return _mlx5_modify_rule_destination(handle
->rule
[0],
913 for (i
= 0; i
< handle
->num_rules
; i
++) {
914 if (mlx5_flow_dests_cmp(new_dest
, &handle
->rule
[i
]->dest_attr
))
915 return _mlx5_modify_rule_destination(handle
->rule
[i
],
922 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
923 static int connect_fwd_rules(struct mlx5_core_dev
*dev
,
924 struct mlx5_flow_table
*new_next_ft
,
925 struct mlx5_flow_table
*old_next_ft
)
927 struct mlx5_flow_destination dest
= {};
928 struct mlx5_flow_rule
*iter
;
931 /* new_next_ft and old_next_ft could be NULL only
932 * when we create/destroy the anchor flow table.
934 if (!new_next_ft
|| !old_next_ft
)
937 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
938 dest
.ft
= new_next_ft
;
940 mutex_lock(&old_next_ft
->lock
);
941 list_splice_init(&old_next_ft
->fwd_rules
, &new_next_ft
->fwd_rules
);
942 mutex_unlock(&old_next_ft
->lock
);
943 list_for_each_entry(iter
, &new_next_ft
->fwd_rules
, next_ft
) {
944 err
= _mlx5_modify_rule_destination(iter
, &dest
);
946 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
952 static int connect_flow_table(struct mlx5_core_dev
*dev
, struct mlx5_flow_table
*ft
,
953 struct fs_prio
*prio
)
955 struct mlx5_flow_table
*next_ft
;
958 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
960 if (list_empty(&prio
->node
.children
)) {
961 err
= connect_prev_fts(dev
, ft
, prio
);
965 next_ft
= find_next_chained_ft(prio
);
966 err
= connect_fwd_rules(dev
, ft
, next_ft
);
971 if (MLX5_CAP_FLOWTABLE(dev
,
972 flow_table_properties_nic_receive
.modify_root
))
973 err
= update_root_ft_create(ft
, prio
);
977 static void list_add_flow_table(struct mlx5_flow_table
*ft
,
978 struct fs_prio
*prio
)
980 struct list_head
*prev
= &prio
->node
.children
;
981 struct mlx5_flow_table
*iter
;
983 fs_for_each_ft(iter
, prio
) {
984 if (iter
->level
> ft
->level
)
986 prev
= &iter
->node
.list
;
988 list_add(&ft
->node
.list
, prev
);
991 static struct mlx5_flow_table
*__mlx5_create_flow_table(struct mlx5_flow_namespace
*ns
,
992 struct mlx5_flow_table_attr
*ft_attr
,
993 enum fs_flow_table_op_mod op_mod
,
996 struct mlx5_flow_root_namespace
*root
= find_root(&ns
->node
);
997 struct mlx5_flow_table
*next_ft
= NULL
;
998 struct fs_prio
*fs_prio
= NULL
;
999 struct mlx5_flow_table
*ft
;
1004 pr_err("mlx5: flow steering failed to find root of namespace\n");
1005 return ERR_PTR(-ENODEV
);
1008 mutex_lock(&root
->chain_lock
);
1009 fs_prio
= find_prio(ns
, ft_attr
->prio
);
1014 if (ft_attr
->level
>= fs_prio
->num_levels
) {
1018 /* The level is related to the
1019 * priority level range.
1021 ft_attr
->level
+= fs_prio
->start_level
;
1022 ft
= alloc_flow_table(ft_attr
->level
,
1024 ft_attr
->max_fte
? roundup_pow_of_two(ft_attr
->max_fte
) : 0,
1026 op_mod
, ft_attr
->flags
);
1032 tree_init_node(&ft
->node
, del_hw_flow_table
, del_sw_flow_table
);
1033 log_table_sz
= ft
->max_fte
? ilog2(ft
->max_fte
) : 0;
1034 next_ft
= find_next_chained_ft(fs_prio
);
1035 err
= mlx5_cmd_create_flow_table(root
->dev
, ft
->vport
, ft
->op_mod
, ft
->type
,
1036 ft
->level
, log_table_sz
, next_ft
, &ft
->id
,
1041 err
= connect_flow_table(root
->dev
, ft
, fs_prio
);
1044 ft
->node
.active
= true;
1045 down_write_ref_node(&fs_prio
->node
);
1046 tree_add_node(&ft
->node
, &fs_prio
->node
);
1047 list_add_flow_table(ft
, fs_prio
);
1049 up_write_ref_node(&fs_prio
->node
);
1050 mutex_unlock(&root
->chain_lock
);
1053 mlx5_cmd_destroy_flow_table(root
->dev
, ft
);
1057 mutex_unlock(&root
->chain_lock
);
1058 return ERR_PTR(err
);
1061 struct mlx5_flow_table
*mlx5_create_flow_table(struct mlx5_flow_namespace
*ns
,
1062 struct mlx5_flow_table_attr
*ft_attr
)
1064 return __mlx5_create_flow_table(ns
, ft_attr
, FS_FT_OP_MOD_NORMAL
, 0);
1067 struct mlx5_flow_table
*mlx5_create_vport_flow_table(struct mlx5_flow_namespace
*ns
,
1068 int prio
, int max_fte
,
1069 u32 level
, u16 vport
)
1071 struct mlx5_flow_table_attr ft_attr
= {};
1073 ft_attr
.max_fte
= max_fte
;
1074 ft_attr
.level
= level
;
1075 ft_attr
.prio
= prio
;
1077 return __mlx5_create_flow_table(ns
, &ft_attr
, FS_FT_OP_MOD_NORMAL
, vport
);
1080 struct mlx5_flow_table
*
1081 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace
*ns
,
1082 int prio
, u32 level
)
1084 struct mlx5_flow_table_attr ft_attr
= {};
1086 ft_attr
.level
= level
;
1087 ft_attr
.prio
= prio
;
1088 return __mlx5_create_flow_table(ns
, &ft_attr
, FS_FT_OP_MOD_LAG_DEMUX
, 0);
1090 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table
);
1092 struct mlx5_flow_table
*
1093 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace
*ns
,
1095 int num_flow_table_entries
,
1100 struct mlx5_flow_table_attr ft_attr
= {};
1101 struct mlx5_flow_table
*ft
;
1103 if (max_num_groups
> num_flow_table_entries
)
1104 return ERR_PTR(-EINVAL
);
1106 ft_attr
.max_fte
= num_flow_table_entries
;
1107 ft_attr
.prio
= prio
;
1108 ft_attr
.level
= level
;
1109 ft_attr
.flags
= flags
;
1111 ft
= mlx5_create_flow_table(ns
, &ft_attr
);
1115 ft
->autogroup
.active
= true;
1116 ft
->autogroup
.required_groups
= max_num_groups
;
1120 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table
);
1122 struct mlx5_flow_group
*mlx5_create_flow_group(struct mlx5_flow_table
*ft
,
1125 void *match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1126 fg_in
, match_criteria
);
1127 u8 match_criteria_enable
= MLX5_GET(create_flow_group_in
,
1129 match_criteria_enable
);
1130 int start_index
= MLX5_GET(create_flow_group_in
, fg_in
,
1132 int end_index
= MLX5_GET(create_flow_group_in
, fg_in
,
1134 struct mlx5_core_dev
*dev
= get_dev(&ft
->node
);
1135 struct mlx5_flow_group
*fg
;
1138 if (!check_valid_mask(match_criteria_enable
, match_criteria
))
1139 return ERR_PTR(-EINVAL
);
1141 if (ft
->autogroup
.active
)
1142 return ERR_PTR(-EPERM
);
1144 down_write_ref_node(&ft
->node
);
1145 fg
= alloc_insert_flow_group(ft
, match_criteria_enable
, match_criteria
,
1146 start_index
, end_index
,
1147 ft
->node
.children
.prev
);
1148 up_write_ref_node(&ft
->node
);
1152 err
= mlx5_cmd_create_flow_group(dev
, ft
, fg_in
, &fg
->id
);
1154 tree_put_node(&fg
->node
);
1155 return ERR_PTR(err
);
1157 trace_mlx5_fs_add_fg(fg
);
1158 fg
->node
.active
= true;
1163 static struct mlx5_flow_rule
*alloc_rule(struct mlx5_flow_destination
*dest
)
1165 struct mlx5_flow_rule
*rule
;
1167 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
1171 INIT_LIST_HEAD(&rule
->next_ft
);
1172 rule
->node
.type
= FS_TYPE_FLOW_DEST
;
1174 memcpy(&rule
->dest_attr
, dest
, sizeof(*dest
));
1179 static struct mlx5_flow_handle
*alloc_handle(int num_rules
)
1181 struct mlx5_flow_handle
*handle
;
1183 handle
= kzalloc(sizeof(*handle
) + sizeof(handle
->rule
[0]) *
1184 num_rules
, GFP_KERNEL
);
1188 handle
->num_rules
= num_rules
;
1193 static void destroy_flow_handle(struct fs_fte
*fte
,
1194 struct mlx5_flow_handle
*handle
,
1195 struct mlx5_flow_destination
*dest
,
1199 if (refcount_dec_and_test(&handle
->rule
[i
]->node
.refcount
)) {
1201 list_del(&handle
->rule
[i
]->node
.list
);
1202 kfree(handle
->rule
[i
]);
1208 static struct mlx5_flow_handle
*
1209 create_flow_handle(struct fs_fte
*fte
,
1210 struct mlx5_flow_destination
*dest
,
1215 struct mlx5_flow_handle
*handle
;
1216 struct mlx5_flow_rule
*rule
= NULL
;
1217 static int count
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS
);
1218 static int dst
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST
);
1222 handle
= alloc_handle((dest_num
) ? dest_num
: 1);
1224 return ERR_PTR(-ENOMEM
);
1228 rule
= find_flow_rule(fte
, dest
+ i
);
1230 refcount_inc(&rule
->node
.refcount
);
1236 rule
= alloc_rule(dest
+ i
);
1240 /* Add dest to dests list- we need flow tables to be in the
1241 * end of the list for forward to next prio rules.
1243 tree_init_node(&rule
->node
, NULL
, del_sw_hw_rule
);
1245 dest
[i
].type
!= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
)
1246 list_add(&rule
->node
.list
, &fte
->node
.children
);
1248 list_add_tail(&rule
->node
.list
, &fte
->node
.children
);
1252 type
= dest
[i
].type
==
1253 MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
1254 *modify_mask
|= type
? count
: dst
;
1257 handle
->rule
[i
] = rule
;
1258 } while (++i
< dest_num
);
1263 destroy_flow_handle(fte
, handle
, dest
, i
);
1264 return ERR_PTR(-ENOMEM
);
1267 /* fte should not be deleted while calling this function */
1268 static struct mlx5_flow_handle
*
1269 add_rule_fte(struct fs_fte
*fte
,
1270 struct mlx5_flow_group
*fg
,
1271 struct mlx5_flow_destination
*dest
,
1275 struct mlx5_flow_handle
*handle
;
1276 struct mlx5_flow_table
*ft
;
1277 int modify_mask
= 0;
1279 bool new_rule
= false;
1281 handle
= create_flow_handle(fte
, dest
, dest_num
, &modify_mask
,
1283 if (IS_ERR(handle
) || !new_rule
)
1287 modify_mask
|= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION
);
1289 fs_get_obj(ft
, fg
->node
.parent
);
1290 if (!(fte
->status
& FS_FTE_STATUS_EXISTING
))
1291 err
= mlx5_cmd_create_fte(get_dev(&ft
->node
),
1294 err
= mlx5_cmd_update_fte(get_dev(&ft
->node
),
1295 ft
, fg
->id
, modify_mask
, fte
);
1299 fte
->node
.active
= true;
1300 fte
->status
|= FS_FTE_STATUS_EXISTING
;
1301 atomic_inc(&fte
->node
.version
);
1307 destroy_flow_handle(fte
, handle
, dest
, handle
->num_rules
);
1308 return ERR_PTR(err
);
1311 static struct mlx5_flow_group
*alloc_auto_flow_group(struct mlx5_flow_table
*ft
,
1312 struct mlx5_flow_spec
*spec
)
1314 struct list_head
*prev
= &ft
->node
.children
;
1315 struct mlx5_flow_group
*fg
;
1316 unsigned int candidate_index
= 0;
1317 unsigned int group_size
= 0;
1319 if (!ft
->autogroup
.active
)
1320 return ERR_PTR(-ENOENT
);
1322 if (ft
->autogroup
.num_groups
< ft
->autogroup
.required_groups
)
1323 /* We save place for flow groups in addition to max types */
1324 group_size
= ft
->max_fte
/ (ft
->autogroup
.required_groups
+ 1);
1326 /* ft->max_fte == ft->autogroup.max_types */
1327 if (group_size
== 0)
1330 /* sorted by start_index */
1331 fs_for_each_fg(fg
, ft
) {
1332 if (candidate_index
+ group_size
> fg
->start_index
)
1333 candidate_index
= fg
->start_index
+ fg
->max_ftes
;
1336 prev
= &fg
->node
.list
;
1339 if (candidate_index
+ group_size
> ft
->max_fte
)
1340 return ERR_PTR(-ENOSPC
);
1342 fg
= alloc_insert_flow_group(ft
,
1343 spec
->match_criteria_enable
,
1344 spec
->match_criteria
,
1346 candidate_index
+ group_size
- 1,
1351 ft
->autogroup
.num_groups
++;
1357 static int create_auto_flow_group(struct mlx5_flow_table
*ft
,
1358 struct mlx5_flow_group
*fg
)
1360 struct mlx5_core_dev
*dev
= get_dev(&ft
->node
);
1361 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1362 void *match_criteria_addr
;
1366 in
= kvzalloc(inlen
, GFP_KERNEL
);
1370 MLX5_SET(create_flow_group_in
, in
, match_criteria_enable
,
1371 fg
->mask
.match_criteria_enable
);
1372 MLX5_SET(create_flow_group_in
, in
, start_flow_index
, fg
->start_index
);
1373 MLX5_SET(create_flow_group_in
, in
, end_flow_index
, fg
->start_index
+
1375 match_criteria_addr
= MLX5_ADDR_OF(create_flow_group_in
,
1376 in
, match_criteria
);
1377 memcpy(match_criteria_addr
, fg
->mask
.match_criteria
,
1378 sizeof(fg
->mask
.match_criteria
));
1380 err
= mlx5_cmd_create_flow_group(dev
, ft
, in
, &fg
->id
);
1382 fg
->node
.active
= true;
1383 trace_mlx5_fs_add_fg(fg
);
1390 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination
*d1
,
1391 struct mlx5_flow_destination
*d2
)
1393 if (d1
->type
== d2
->type
) {
1394 if ((d1
->type
== MLX5_FLOW_DESTINATION_TYPE_VPORT
&&
1395 d1
->vport_num
== d2
->vport_num
) ||
1396 (d1
->type
== MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
&&
1397 d1
->ft
== d2
->ft
) ||
1398 (d1
->type
== MLX5_FLOW_DESTINATION_TYPE_TIR
&&
1399 d1
->tir_num
== d2
->tir_num
))
1406 static struct mlx5_flow_rule
*find_flow_rule(struct fs_fte
*fte
,
1407 struct mlx5_flow_destination
*dest
)
1409 struct mlx5_flow_rule
*rule
;
1411 list_for_each_entry(rule
, &fte
->node
.children
, node
.list
) {
1412 if (mlx5_flow_dests_cmp(&rule
->dest_attr
, dest
))
1418 static bool check_conflicting_actions(u32 action1
, u32 action2
)
1420 u32 xored_actions
= action1
^ action2
;
1422 /* if one rule only wants to count, it's ok */
1423 if (action1
== MLX5_FLOW_CONTEXT_ACTION_COUNT
||
1424 action2
== MLX5_FLOW_CONTEXT_ACTION_COUNT
)
1427 if (xored_actions
& (MLX5_FLOW_CONTEXT_ACTION_DROP
|
1428 MLX5_FLOW_CONTEXT_ACTION_ENCAP
|
1429 MLX5_FLOW_CONTEXT_ACTION_DECAP
))
1435 static int check_conflicting_ftes(struct fs_fte
*fte
, const struct mlx5_flow_act
*flow_act
)
1437 if (check_conflicting_actions(flow_act
->action
, fte
->action
)) {
1438 mlx5_core_warn(get_dev(&fte
->node
),
1439 "Found two FTEs with conflicting actions\n");
1443 if (fte
->flow_tag
!= flow_act
->flow_tag
) {
1444 mlx5_core_warn(get_dev(&fte
->node
),
1445 "FTE flow tag %u already exists with different flow tag %u\n",
1447 flow_act
->flow_tag
);
1454 static struct mlx5_flow_handle
*add_rule_fg(struct mlx5_flow_group
*fg
,
1456 struct mlx5_flow_act
*flow_act
,
1457 struct mlx5_flow_destination
*dest
,
1461 struct mlx5_flow_handle
*handle
;
1466 ret
= check_conflicting_ftes(fte
, flow_act
);
1468 return ERR_PTR(ret
);
1470 old_action
= fte
->action
;
1471 fte
->action
|= flow_act
->action
;
1472 handle
= add_rule_fte(fte
, fg
, dest
, dest_num
,
1473 old_action
!= flow_act
->action
);
1474 if (IS_ERR(handle
)) {
1475 fte
->action
= old_action
;
1478 trace_mlx5_fs_set_fte(fte
, false);
1480 for (i
= 0; i
< handle
->num_rules
; i
++) {
1481 if (refcount_read(&handle
->rule
[i
]->node
.refcount
) == 1) {
1482 tree_add_node(&handle
->rule
[i
]->node
, &fte
->node
);
1483 trace_mlx5_fs_add_rule(handle
->rule
[i
]);
1489 struct mlx5_fc
*mlx5_flow_rule_counter(struct mlx5_flow_handle
*handle
)
1491 struct mlx5_flow_rule
*dst
;
1494 fs_get_obj(fte
, handle
->rule
[0]->node
.parent
);
1496 fs_for_each_dst(dst
, fte
) {
1497 if (dst
->dest_attr
.type
== MLX5_FLOW_DESTINATION_TYPE_COUNTER
)
1498 return dst
->dest_attr
.counter
;
1504 static bool counter_is_valid(struct mlx5_fc
*counter
, u32 action
)
1506 if (!(action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
))
1512 return (action
& (MLX5_FLOW_CONTEXT_ACTION_DROP
|
1513 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
));
1516 static bool dest_is_valid(struct mlx5_flow_destination
*dest
,
1518 struct mlx5_flow_table
*ft
)
1520 if (dest
&& (dest
->type
== MLX5_FLOW_DESTINATION_TYPE_COUNTER
))
1521 return counter_is_valid(dest
->counter
, action
);
1523 if (!(action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
))
1526 if (!dest
|| ((dest
->type
==
1527 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
) &&
1528 (dest
->ft
->level
<= ft
->level
)))
1534 struct list_head list
;
1535 struct mlx5_flow_group
*g
;
1538 struct match_list_head
{
1539 struct list_head list
;
1540 struct match_list first
;
1543 static void free_match_list(struct match_list_head
*head
)
1545 if (!list_empty(&head
->list
)) {
1546 struct match_list
*iter
, *match_tmp
;
1548 list_del(&head
->first
.list
);
1549 tree_put_node(&head
->first
.g
->node
);
1550 list_for_each_entry_safe(iter
, match_tmp
, &head
->list
,
1552 tree_put_node(&iter
->g
->node
);
1553 list_del(&iter
->list
);
1559 static int build_match_list(struct match_list_head
*match_head
,
1560 struct mlx5_flow_table
*ft
,
1561 struct mlx5_flow_spec
*spec
)
1563 struct rhlist_head
*tmp
, *list
;
1564 struct mlx5_flow_group
*g
;
1568 INIT_LIST_HEAD(&match_head
->list
);
1569 /* Collect all fgs which has a matching match_criteria */
1570 list
= rhltable_lookup(&ft
->fgs_hash
, spec
, rhash_fg
);
1571 /* RCU is atomic, we can't execute FW commands here */
1572 rhl_for_each_entry_rcu(g
, tmp
, list
, hash
) {
1573 struct match_list
*curr_match
;
1575 if (likely(list_empty(&match_head
->list
))) {
1576 if (!tree_get_node(&g
->node
))
1578 match_head
->first
.g
= g
;
1579 list_add_tail(&match_head
->first
.list
,
1584 curr_match
= kmalloc(sizeof(*curr_match
), GFP_ATOMIC
);
1586 free_match_list(match_head
);
1590 if (!tree_get_node(&g
->node
)) {
1595 list_add_tail(&curr_match
->list
, &match_head
->list
);
1602 static u64
matched_fgs_get_version(struct list_head
*match_head
)
1604 struct match_list
*iter
;
1607 list_for_each_entry(iter
, match_head
, list
)
1608 version
+= (u64
)atomic_read(&iter
->g
->node
.version
);
1612 static struct mlx5_flow_handle
*
1613 try_add_to_existing_fg(struct mlx5_flow_table
*ft
,
1614 struct list_head
*match_head
,
1615 struct mlx5_flow_spec
*spec
,
1616 struct mlx5_flow_act
*flow_act
,
1617 struct mlx5_flow_destination
*dest
,
1621 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
1622 struct mlx5_flow_group
*g
;
1623 struct mlx5_flow_handle
*rule
;
1624 struct match_list
*iter
;
1625 bool take_write
= false;
1630 fte
= alloc_fte(ft
, spec
->match_value
, flow_act
);
1632 return ERR_PTR(-ENOMEM
);
1634 list_for_each_entry(iter
, match_head
, list
) {
1635 nested_down_read_ref_node(&iter
->g
->node
, FS_LOCK_PARENT
);
1636 ida_pre_get(&iter
->g
->fte_allocator
, GFP_KERNEL
);
1639 search_again_locked
:
1640 version
= matched_fgs_get_version(match_head
);
1641 /* Try to find a fg that already contains a matching fte */
1642 list_for_each_entry(iter
, match_head
, list
) {
1643 struct fs_fte
*fte_tmp
;
1646 fte_tmp
= rhashtable_lookup_fast(&g
->ftes_hash
, spec
->match_value
,
1648 if (!fte_tmp
|| !tree_get_node(&fte_tmp
->node
))
1651 nested_down_write_ref_node(&fte_tmp
->node
, FS_LOCK_CHILD
);
1653 list_for_each_entry(iter
, match_head
, list
)
1654 up_read_ref_node(&iter
->g
->node
);
1656 list_for_each_entry(iter
, match_head
, list
)
1657 up_write_ref_node(&iter
->g
->node
);
1660 rule
= add_rule_fg(g
, spec
->match_value
,
1661 flow_act
, dest
, dest_num
, fte_tmp
);
1662 up_write_ref_node(&fte_tmp
->node
);
1663 tree_put_node(&fte_tmp
->node
);
1664 kmem_cache_free(steering
->ftes_cache
, fte
);
1668 /* No group with matching fte found. Try to add a new fte to any
1673 list_for_each_entry(iter
, match_head
, list
)
1674 up_read_ref_node(&iter
->g
->node
);
1675 list_for_each_entry(iter
, match_head
, list
)
1676 nested_down_write_ref_node(&iter
->g
->node
,
1681 /* Check the ft version, for case that new flow group
1682 * was added while the fgs weren't locked
1684 if (atomic_read(&ft
->node
.version
) != ft_version
) {
1685 rule
= ERR_PTR(-EAGAIN
);
1689 /* Check the fgs version, for case the new FTE with the
1690 * same values was added while the fgs weren't locked
1692 if (version
!= matched_fgs_get_version(match_head
))
1693 goto search_again_locked
;
1695 list_for_each_entry(iter
, match_head
, list
) {
1698 if (!g
->node
.active
)
1700 err
= insert_fte(g
, fte
);
1704 list_for_each_entry(iter
, match_head
, list
)
1705 up_write_ref_node(&iter
->g
->node
);
1706 kmem_cache_free(steering
->ftes_cache
, fte
);
1707 return ERR_PTR(err
);
1710 nested_down_write_ref_node(&fte
->node
, FS_LOCK_CHILD
);
1711 list_for_each_entry(iter
, match_head
, list
)
1712 up_write_ref_node(&iter
->g
->node
);
1713 rule
= add_rule_fg(g
, spec
->match_value
,
1714 flow_act
, dest
, dest_num
, fte
);
1715 up_write_ref_node(&fte
->node
);
1716 tree_put_node(&fte
->node
);
1719 rule
= ERR_PTR(-ENOENT
);
1721 list_for_each_entry(iter
, match_head
, list
)
1722 up_write_ref_node(&iter
->g
->node
);
1723 kmem_cache_free(steering
->ftes_cache
, fte
);
1727 static struct mlx5_flow_handle
*
1728 _mlx5_add_flow_rules(struct mlx5_flow_table
*ft
,
1729 struct mlx5_flow_spec
*spec
,
1730 struct mlx5_flow_act
*flow_act
,
1731 struct mlx5_flow_destination
*dest
,
1735 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
1736 struct mlx5_flow_group
*g
;
1737 struct mlx5_flow_handle
*rule
;
1738 struct match_list_head match_head
;
1739 bool take_write
= false;
1745 if (!check_valid_spec(spec
))
1746 return ERR_PTR(-EINVAL
);
1748 for (i
= 0; i
< dest_num
; i
++) {
1749 if (!dest_is_valid(&dest
[i
], flow_act
->action
, ft
))
1750 return ERR_PTR(-EINVAL
);
1752 nested_down_read_ref_node(&ft
->node
, FS_LOCK_GRANDPARENT
);
1753 search_again_locked
:
1754 version
= atomic_read(&ft
->node
.version
);
1756 /* Collect all fgs which has a matching match_criteria */
1757 err
= build_match_list(&match_head
, ft
, spec
);
1759 return ERR_PTR(err
);
1762 up_read_ref_node(&ft
->node
);
1764 rule
= try_add_to_existing_fg(ft
, &match_head
.list
, spec
, flow_act
, dest
,
1766 free_match_list(&match_head
);
1767 if (!IS_ERR(rule
) ||
1768 (PTR_ERR(rule
) != -ENOENT
&& PTR_ERR(rule
) != -EAGAIN
))
1772 nested_down_write_ref_node(&ft
->node
, FS_LOCK_GRANDPARENT
);
1776 if (PTR_ERR(rule
) == -EAGAIN
||
1777 version
!= atomic_read(&ft
->node
.version
))
1778 goto search_again_locked
;
1780 g
= alloc_auto_flow_group(ft
, spec
);
1783 up_write_ref_node(&ft
->node
);
1787 nested_down_write_ref_node(&g
->node
, FS_LOCK_PARENT
);
1788 up_write_ref_node(&ft
->node
);
1790 err
= create_auto_flow_group(ft
, g
);
1792 goto err_release_fg
;
1794 fte
= alloc_fte(ft
, spec
->match_value
, flow_act
);
1797 goto err_release_fg
;
1800 err
= insert_fte(g
, fte
);
1802 kmem_cache_free(steering
->ftes_cache
, fte
);
1803 goto err_release_fg
;
1806 nested_down_write_ref_node(&fte
->node
, FS_LOCK_CHILD
);
1807 up_write_ref_node(&g
->node
);
1808 rule
= add_rule_fg(g
, spec
->match_value
, flow_act
, dest
,
1810 up_write_ref_node(&fte
->node
);
1811 tree_put_node(&fte
->node
);
1812 tree_put_node(&g
->node
);
1816 up_write_ref_node(&g
->node
);
1817 tree_put_node(&g
->node
);
1818 return ERR_PTR(err
);
1821 static bool fwd_next_prio_supported(struct mlx5_flow_table
*ft
)
1823 return ((ft
->type
== FS_FT_NIC_RX
) &&
1824 (MLX5_CAP_FLOWTABLE(get_dev(&ft
->node
), nic_rx_multi_path_tirs
)));
1827 struct mlx5_flow_handle
*
1828 mlx5_add_flow_rules(struct mlx5_flow_table
*ft
,
1829 struct mlx5_flow_spec
*spec
,
1830 struct mlx5_flow_act
*flow_act
,
1831 struct mlx5_flow_destination
*dest
,
1834 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
1835 struct mlx5_flow_destination gen_dest
= {};
1836 struct mlx5_flow_table
*next_ft
= NULL
;
1837 struct mlx5_flow_handle
*handle
= NULL
;
1838 u32 sw_action
= flow_act
->action
;
1839 struct fs_prio
*prio
;
1841 fs_get_obj(prio
, ft
->node
.parent
);
1842 if (flow_act
->action
== MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
) {
1843 if (!fwd_next_prio_supported(ft
))
1844 return ERR_PTR(-EOPNOTSUPP
);
1846 return ERR_PTR(-EINVAL
);
1847 mutex_lock(&root
->chain_lock
);
1848 next_ft
= find_next_chained_ft(prio
);
1850 gen_dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1851 gen_dest
.ft
= next_ft
;
1854 flow_act
->action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1856 mutex_unlock(&root
->chain_lock
);
1857 return ERR_PTR(-EOPNOTSUPP
);
1861 handle
= _mlx5_add_flow_rules(ft
, spec
, flow_act
, dest
, dest_num
);
1863 if (sw_action
== MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
) {
1864 if (!IS_ERR_OR_NULL(handle
) &&
1865 (list_empty(&handle
->rule
[0]->next_ft
))) {
1866 mutex_lock(&next_ft
->lock
);
1867 list_add(&handle
->rule
[0]->next_ft
,
1868 &next_ft
->fwd_rules
);
1869 mutex_unlock(&next_ft
->lock
);
1870 handle
->rule
[0]->sw_action
= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
;
1872 mutex_unlock(&root
->chain_lock
);
1876 EXPORT_SYMBOL(mlx5_add_flow_rules
);
1878 void mlx5_del_flow_rules(struct mlx5_flow_handle
*handle
)
1882 for (i
= handle
->num_rules
- 1; i
>= 0; i
--)
1883 tree_remove_node(&handle
->rule
[i
]->node
);
1886 EXPORT_SYMBOL(mlx5_del_flow_rules
);
1888 /* Assuming prio->node.children(flow tables) is sorted by level */
1889 static struct mlx5_flow_table
*find_next_ft(struct mlx5_flow_table
*ft
)
1891 struct fs_prio
*prio
;
1893 fs_get_obj(prio
, ft
->node
.parent
);
1895 if (!list_is_last(&ft
->node
.list
, &prio
->node
.children
))
1896 return list_next_entry(ft
, node
.list
);
1897 return find_next_chained_ft(prio
);
1900 static int update_root_ft_destroy(struct mlx5_flow_table
*ft
)
1902 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
1903 struct mlx5_ft_underlay_qp
*uqp
;
1904 struct mlx5_flow_table
*new_root_ft
= NULL
;
1908 if (root
->root_ft
!= ft
)
1911 new_root_ft
= find_next_ft(ft
);
1914 root
->root_ft
= NULL
;
1918 if (list_empty(&root
->underlay_qpns
)) {
1919 /* Don't set any QPN (zero) in case QPN list is empty */
1921 err
= mlx5_cmd_update_root_ft(root
->dev
, new_root_ft
, qpn
,
1924 list_for_each_entry(uqp
, &root
->underlay_qpns
, list
) {
1926 err
= mlx5_cmd_update_root_ft(root
->dev
, new_root_ft
,
1934 mlx5_core_warn(root
->dev
,
1935 "Update root flow table of id(%u) qpn(%d) failed\n",
1938 root
->root_ft
= new_root_ft
;
1943 /* Connect flow table from previous priority to
1944 * the next flow table.
1946 static int disconnect_flow_table(struct mlx5_flow_table
*ft
)
1948 struct mlx5_core_dev
*dev
= get_dev(&ft
->node
);
1949 struct mlx5_flow_table
*next_ft
;
1950 struct fs_prio
*prio
;
1953 err
= update_root_ft_destroy(ft
);
1957 fs_get_obj(prio
, ft
->node
.parent
);
1958 if (!(list_first_entry(&prio
->node
.children
,
1959 struct mlx5_flow_table
,
1963 next_ft
= find_next_chained_ft(prio
);
1964 err
= connect_fwd_rules(dev
, next_ft
, ft
);
1968 err
= connect_prev_fts(dev
, next_ft
, prio
);
1970 mlx5_core_warn(dev
, "Failed to disconnect flow table %d\n",
1975 int mlx5_destroy_flow_table(struct mlx5_flow_table
*ft
)
1977 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
1980 mutex_lock(&root
->chain_lock
);
1981 err
= disconnect_flow_table(ft
);
1983 mutex_unlock(&root
->chain_lock
);
1986 if (tree_remove_node(&ft
->node
))
1987 mlx5_core_warn(get_dev(&ft
->node
), "Flow table %d wasn't destroyed, refcount > 1\n",
1989 mutex_unlock(&root
->chain_lock
);
1993 EXPORT_SYMBOL(mlx5_destroy_flow_table
);
1995 void mlx5_destroy_flow_group(struct mlx5_flow_group
*fg
)
1997 if (tree_remove_node(&fg
->node
))
1998 mlx5_core_warn(get_dev(&fg
->node
), "Flow group %d wasn't destroyed, refcount > 1\n",
2002 struct mlx5_flow_namespace
*mlx5_get_flow_namespace(struct mlx5_core_dev
*dev
,
2003 enum mlx5_flow_namespace_type type
)
2005 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2006 struct mlx5_flow_root_namespace
*root_ns
;
2008 struct fs_prio
*fs_prio
;
2009 struct mlx5_flow_namespace
*ns
;
2015 case MLX5_FLOW_NAMESPACE_BYPASS
:
2016 case MLX5_FLOW_NAMESPACE_LAG
:
2017 case MLX5_FLOW_NAMESPACE_OFFLOADS
:
2018 case MLX5_FLOW_NAMESPACE_ETHTOOL
:
2019 case MLX5_FLOW_NAMESPACE_KERNEL
:
2020 case MLX5_FLOW_NAMESPACE_LEFTOVERS
:
2021 case MLX5_FLOW_NAMESPACE_ANCHOR
:
2024 case MLX5_FLOW_NAMESPACE_FDB
:
2025 if (steering
->fdb_root_ns
)
2026 return &steering
->fdb_root_ns
->ns
;
2029 case MLX5_FLOW_NAMESPACE_ESW_EGRESS
:
2030 if (steering
->esw_egress_root_ns
)
2031 return &steering
->esw_egress_root_ns
->ns
;
2034 case MLX5_FLOW_NAMESPACE_ESW_INGRESS
:
2035 if (steering
->esw_ingress_root_ns
)
2036 return &steering
->esw_ingress_root_ns
->ns
;
2039 case MLX5_FLOW_NAMESPACE_SNIFFER_RX
:
2040 if (steering
->sniffer_rx_root_ns
)
2041 return &steering
->sniffer_rx_root_ns
->ns
;
2044 case MLX5_FLOW_NAMESPACE_SNIFFER_TX
:
2045 if (steering
->sniffer_tx_root_ns
)
2046 return &steering
->sniffer_tx_root_ns
->ns
;
2053 root_ns
= steering
->root_ns
;
2057 fs_prio
= find_prio(&root_ns
->ns
, prio
);
2061 ns
= list_first_entry(&fs_prio
->node
.children
,
2067 EXPORT_SYMBOL(mlx5_get_flow_namespace
);
2069 static struct fs_prio
*fs_create_prio(struct mlx5_flow_namespace
*ns
,
2070 unsigned int prio
, int num_levels
)
2072 struct fs_prio
*fs_prio
;
2074 fs_prio
= kzalloc(sizeof(*fs_prio
), GFP_KERNEL
);
2076 return ERR_PTR(-ENOMEM
);
2078 fs_prio
->node
.type
= FS_TYPE_PRIO
;
2079 tree_init_node(&fs_prio
->node
, NULL
, del_sw_prio
);
2080 tree_add_node(&fs_prio
->node
, &ns
->node
);
2081 fs_prio
->num_levels
= num_levels
;
2082 fs_prio
->prio
= prio
;
2083 list_add_tail(&fs_prio
->node
.list
, &ns
->node
.children
);
2088 static struct mlx5_flow_namespace
*fs_init_namespace(struct mlx5_flow_namespace
2091 ns
->node
.type
= FS_TYPE_NAMESPACE
;
2096 static struct mlx5_flow_namespace
*fs_create_namespace(struct fs_prio
*prio
)
2098 struct mlx5_flow_namespace
*ns
;
2100 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
2102 return ERR_PTR(-ENOMEM
);
2104 fs_init_namespace(ns
);
2105 tree_init_node(&ns
->node
, NULL
, del_sw_ns
);
2106 tree_add_node(&ns
->node
, &prio
->node
);
2107 list_add_tail(&ns
->node
.list
, &prio
->node
.children
);
2112 static int create_leaf_prios(struct mlx5_flow_namespace
*ns
, int prio
,
2113 struct init_tree_node
*prio_metadata
)
2115 struct fs_prio
*fs_prio
;
2118 for (i
= 0; i
< prio_metadata
->num_leaf_prios
; i
++) {
2119 fs_prio
= fs_create_prio(ns
, prio
++, prio_metadata
->num_levels
);
2120 if (IS_ERR(fs_prio
))
2121 return PTR_ERR(fs_prio
);
2126 #define FLOW_TABLE_BIT_SZ 1
2127 #define GET_FLOW_TABLE_CAP(dev, offset) \
2128 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2130 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2131 static bool has_required_caps(struct mlx5_core_dev
*dev
, struct node_caps
*caps
)
2135 for (i
= 0; i
< caps
->arr_sz
; i
++) {
2136 if (!GET_FLOW_TABLE_CAP(dev
, caps
->caps
[i
]))
2142 static int init_root_tree_recursive(struct mlx5_flow_steering
*steering
,
2143 struct init_tree_node
*init_node
,
2144 struct fs_node
*fs_parent_node
,
2145 struct init_tree_node
*init_parent_node
,
2148 int max_ft_level
= MLX5_CAP_FLOWTABLE(steering
->dev
,
2149 flow_table_properties_nic_receive
.
2151 struct mlx5_flow_namespace
*fs_ns
;
2152 struct fs_prio
*fs_prio
;
2153 struct fs_node
*base
;
2157 if (init_node
->type
== FS_TYPE_PRIO
) {
2158 if ((init_node
->min_ft_level
> max_ft_level
) ||
2159 !has_required_caps(steering
->dev
, &init_node
->caps
))
2162 fs_get_obj(fs_ns
, fs_parent_node
);
2163 if (init_node
->num_leaf_prios
)
2164 return create_leaf_prios(fs_ns
, prio
, init_node
);
2165 fs_prio
= fs_create_prio(fs_ns
, prio
, init_node
->num_levels
);
2166 if (IS_ERR(fs_prio
))
2167 return PTR_ERR(fs_prio
);
2168 base
= &fs_prio
->node
;
2169 } else if (init_node
->type
== FS_TYPE_NAMESPACE
) {
2170 fs_get_obj(fs_prio
, fs_parent_node
);
2171 fs_ns
= fs_create_namespace(fs_prio
);
2173 return PTR_ERR(fs_ns
);
2174 base
= &fs_ns
->node
;
2179 for (i
= 0; i
< init_node
->ar_size
; i
++) {
2180 err
= init_root_tree_recursive(steering
, &init_node
->children
[i
],
2181 base
, init_node
, prio
);
2184 if (init_node
->children
[i
].type
== FS_TYPE_PRIO
&&
2185 init_node
->children
[i
].num_leaf_prios
) {
2186 prio
+= init_node
->children
[i
].num_leaf_prios
;
2193 static int init_root_tree(struct mlx5_flow_steering
*steering
,
2194 struct init_tree_node
*init_node
,
2195 struct fs_node
*fs_parent_node
)
2198 struct mlx5_flow_namespace
*fs_ns
;
2201 fs_get_obj(fs_ns
, fs_parent_node
);
2202 for (i
= 0; i
< init_node
->ar_size
; i
++) {
2203 err
= init_root_tree_recursive(steering
, &init_node
->children
[i
],
2212 static struct mlx5_flow_root_namespace
*create_root_ns(struct mlx5_flow_steering
*steering
,
2213 enum fs_flow_table_type
2216 struct mlx5_flow_root_namespace
*root_ns
;
2217 struct mlx5_flow_namespace
*ns
;
2219 /* Create the root namespace */
2220 root_ns
= kvzalloc(sizeof(*root_ns
), GFP_KERNEL
);
2224 root_ns
->dev
= steering
->dev
;
2225 root_ns
->table_type
= table_type
;
2227 INIT_LIST_HEAD(&root_ns
->underlay_qpns
);
2230 fs_init_namespace(ns
);
2231 mutex_init(&root_ns
->chain_lock
);
2232 tree_init_node(&ns
->node
, NULL
, NULL
);
2233 tree_add_node(&ns
->node
, NULL
);
2238 static void set_prio_attrs_in_prio(struct fs_prio
*prio
, int acc_level
);
2240 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace
*ns
, int acc_level
)
2242 struct fs_prio
*prio
;
2244 fs_for_each_prio(prio
, ns
) {
2245 /* This updates prio start_level and num_levels */
2246 set_prio_attrs_in_prio(prio
, acc_level
);
2247 acc_level
+= prio
->num_levels
;
2252 static void set_prio_attrs_in_prio(struct fs_prio
*prio
, int acc_level
)
2254 struct mlx5_flow_namespace
*ns
;
2255 int acc_level_ns
= acc_level
;
2257 prio
->start_level
= acc_level
;
2258 fs_for_each_ns(ns
, prio
)
2259 /* This updates start_level and num_levels of ns's priority descendants */
2260 acc_level_ns
= set_prio_attrs_in_ns(ns
, acc_level
);
2261 if (!prio
->num_levels
)
2262 prio
->num_levels
= acc_level_ns
- prio
->start_level
;
2263 WARN_ON(prio
->num_levels
< acc_level_ns
- prio
->start_level
);
2266 static void set_prio_attrs(struct mlx5_flow_root_namespace
*root_ns
)
2268 struct mlx5_flow_namespace
*ns
= &root_ns
->ns
;
2269 struct fs_prio
*prio
;
2270 int start_level
= 0;
2272 fs_for_each_prio(prio
, ns
) {
2273 set_prio_attrs_in_prio(prio
, start_level
);
2274 start_level
+= prio
->num_levels
;
2278 #define ANCHOR_PRIO 0
2279 #define ANCHOR_SIZE 1
2280 #define ANCHOR_LEVEL 0
2281 static int create_anchor_flow_table(struct mlx5_flow_steering
*steering
)
2283 struct mlx5_flow_namespace
*ns
= NULL
;
2284 struct mlx5_flow_table_attr ft_attr
= {};
2285 struct mlx5_flow_table
*ft
;
2287 ns
= mlx5_get_flow_namespace(steering
->dev
, MLX5_FLOW_NAMESPACE_ANCHOR
);
2291 ft_attr
.max_fte
= ANCHOR_SIZE
;
2292 ft_attr
.level
= ANCHOR_LEVEL
;
2293 ft_attr
.prio
= ANCHOR_PRIO
;
2295 ft
= mlx5_create_flow_table(ns
, &ft_attr
);
2297 mlx5_core_err(steering
->dev
, "Failed to create last anchor flow table");
2303 static int init_root_ns(struct mlx5_flow_steering
*steering
)
2305 steering
->root_ns
= create_root_ns(steering
, FS_FT_NIC_RX
);
2306 if (!steering
->root_ns
)
2309 if (init_root_tree(steering
, &root_fs
, &steering
->root_ns
->ns
.node
))
2312 set_prio_attrs(steering
->root_ns
);
2314 if (create_anchor_flow_table(steering
))
2320 mlx5_cleanup_fs(steering
->dev
);
2324 static void clean_tree(struct fs_node
*node
)
2327 struct fs_node
*iter
;
2328 struct fs_node
*temp
;
2330 tree_get_node(node
);
2331 list_for_each_entry_safe(iter
, temp
, &node
->children
, list
)
2333 tree_put_node(node
);
2334 tree_remove_node(node
);
2338 static void cleanup_root_ns(struct mlx5_flow_root_namespace
*root_ns
)
2343 clean_tree(&root_ns
->ns
.node
);
2346 void mlx5_cleanup_fs(struct mlx5_core_dev
*dev
)
2348 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2350 cleanup_root_ns(steering
->root_ns
);
2351 cleanup_root_ns(steering
->esw_egress_root_ns
);
2352 cleanup_root_ns(steering
->esw_ingress_root_ns
);
2353 cleanup_root_ns(steering
->fdb_root_ns
);
2354 cleanup_root_ns(steering
->sniffer_rx_root_ns
);
2355 cleanup_root_ns(steering
->sniffer_tx_root_ns
);
2356 mlx5_cleanup_fc_stats(dev
);
2357 kmem_cache_destroy(steering
->ftes_cache
);
2358 kmem_cache_destroy(steering
->fgs_cache
);
2362 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering
*steering
)
2364 struct fs_prio
*prio
;
2366 steering
->sniffer_tx_root_ns
= create_root_ns(steering
, FS_FT_SNIFFER_TX
);
2367 if (!steering
->sniffer_tx_root_ns
)
2370 /* Create single prio */
2371 prio
= fs_create_prio(&steering
->sniffer_tx_root_ns
->ns
, 0, 1);
2373 cleanup_root_ns(steering
->sniffer_tx_root_ns
);
2374 return PTR_ERR(prio
);
2379 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering
*steering
)
2381 struct fs_prio
*prio
;
2383 steering
->sniffer_rx_root_ns
= create_root_ns(steering
, FS_FT_SNIFFER_RX
);
2384 if (!steering
->sniffer_rx_root_ns
)
2387 /* Create single prio */
2388 prio
= fs_create_prio(&steering
->sniffer_rx_root_ns
->ns
, 0, 1);
2390 cleanup_root_ns(steering
->sniffer_rx_root_ns
);
2391 return PTR_ERR(prio
);
2396 static int init_fdb_root_ns(struct mlx5_flow_steering
*steering
)
2398 struct fs_prio
*prio
;
2400 steering
->fdb_root_ns
= create_root_ns(steering
, FS_FT_FDB
);
2401 if (!steering
->fdb_root_ns
)
2404 prio
= fs_create_prio(&steering
->fdb_root_ns
->ns
, 0, 1);
2408 prio
= fs_create_prio(&steering
->fdb_root_ns
->ns
, 1, 1);
2412 set_prio_attrs(steering
->fdb_root_ns
);
2416 cleanup_root_ns(steering
->fdb_root_ns
);
2417 steering
->fdb_root_ns
= NULL
;
2418 return PTR_ERR(prio
);
2421 static int init_ingress_acl_root_ns(struct mlx5_flow_steering
*steering
)
2423 struct fs_prio
*prio
;
2425 steering
->esw_egress_root_ns
= create_root_ns(steering
, FS_FT_ESW_EGRESS_ACL
);
2426 if (!steering
->esw_egress_root_ns
)
2430 prio
= fs_create_prio(&steering
->esw_egress_root_ns
->ns
, 0,
2431 MLX5_TOTAL_VPORTS(steering
->dev
));
2432 return PTR_ERR_OR_ZERO(prio
);
2435 static int init_egress_acl_root_ns(struct mlx5_flow_steering
*steering
)
2437 struct fs_prio
*prio
;
2439 steering
->esw_ingress_root_ns
= create_root_ns(steering
, FS_FT_ESW_INGRESS_ACL
);
2440 if (!steering
->esw_ingress_root_ns
)
2444 prio
= fs_create_prio(&steering
->esw_ingress_root_ns
->ns
, 0,
2445 MLX5_TOTAL_VPORTS(steering
->dev
));
2446 return PTR_ERR_OR_ZERO(prio
);
2449 int mlx5_init_fs(struct mlx5_core_dev
*dev
)
2451 struct mlx5_flow_steering
*steering
;
2454 err
= mlx5_init_fc_stats(dev
);
2458 steering
= kzalloc(sizeof(*steering
), GFP_KERNEL
);
2461 steering
->dev
= dev
;
2462 dev
->priv
.steering
= steering
;
2464 steering
->fgs_cache
= kmem_cache_create("mlx5_fs_fgs",
2465 sizeof(struct mlx5_flow_group
), 0,
2467 steering
->ftes_cache
= kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte
), 0,
2469 if (!steering
->ftes_cache
|| !steering
->fgs_cache
) {
2474 if ((((MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
) &&
2475 (MLX5_CAP_GEN(dev
, nic_flow_table
))) ||
2476 ((MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_IB
) &&
2477 MLX5_CAP_GEN(dev
, ipoib_enhanced_offloads
))) &&
2478 MLX5_CAP_FLOWTABLE_NIC_RX(dev
, ft_support
)) {
2479 err
= init_root_ns(steering
);
2484 if (MLX5_CAP_GEN(dev
, eswitch_flow_table
)) {
2485 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, ft_support
)) {
2486 err
= init_fdb_root_ns(steering
);
2490 if (MLX5_CAP_ESW_EGRESS_ACL(dev
, ft_support
)) {
2491 err
= init_egress_acl_root_ns(steering
);
2495 if (MLX5_CAP_ESW_INGRESS_ACL(dev
, ft_support
)) {
2496 err
= init_ingress_acl_root_ns(steering
);
2502 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev
, ft_support
)) {
2503 err
= init_sniffer_rx_root_ns(steering
);
2508 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev
, ft_support
)) {
2509 err
= init_sniffer_tx_root_ns(steering
);
2516 mlx5_cleanup_fs(dev
);
2520 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev
*dev
, u32 underlay_qpn
)
2522 struct mlx5_flow_root_namespace
*root
= dev
->priv
.steering
->root_ns
;
2523 struct mlx5_ft_underlay_qp
*new_uqp
;
2526 new_uqp
= kzalloc(sizeof(*new_uqp
), GFP_KERNEL
);
2530 mutex_lock(&root
->chain_lock
);
2532 if (!root
->root_ft
) {
2534 goto update_ft_fail
;
2537 err
= mlx5_cmd_update_root_ft(dev
, root
->root_ft
, underlay_qpn
, false);
2539 mlx5_core_warn(dev
, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2541 goto update_ft_fail
;
2544 new_uqp
->qpn
= underlay_qpn
;
2545 list_add_tail(&new_uqp
->list
, &root
->underlay_qpns
);
2547 mutex_unlock(&root
->chain_lock
);
2552 mutex_unlock(&root
->chain_lock
);
2556 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn
);
2558 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev
*dev
, u32 underlay_qpn
)
2560 struct mlx5_flow_root_namespace
*root
= dev
->priv
.steering
->root_ns
;
2561 struct mlx5_ft_underlay_qp
*uqp
;
2565 mutex_lock(&root
->chain_lock
);
2566 list_for_each_entry(uqp
, &root
->underlay_qpns
, list
) {
2567 if (uqp
->qpn
== underlay_qpn
) {
2574 mlx5_core_warn(dev
, "Failed finding underlay qp (%u) in qpn list\n",
2580 err
= mlx5_cmd_update_root_ft(dev
, root
->root_ft
, underlay_qpn
, true);
2582 mlx5_core_warn(dev
, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2585 list_del(&uqp
->list
);
2586 mutex_unlock(&root
->chain_lock
);
2592 mutex_unlock(&root
->chain_lock
);
2595 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn
);