2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
38 #include "mlx5_core.h"
41 #include "diag/fs_tracepoint.h"
42 #include "accel/ipsec.h"
43 #include "fpga/ipsec.h"
46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
63 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
64 .children = (struct init_tree_node[]) {__VA_ARGS__},\
65 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
68 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
71 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
74 .caps = (long[]) {__VA_ARGS__} }
76 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
77 FS_CAP(flow_table_properties_nic_receive.modify_root), \
78 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
79 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81 #define FS_CHAINING_CAPS_EGRESS \
83 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
84 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
85 FS_CAP(flow_table_properties_nic_transmit \
86 .identified_miss_table_mode), \
87 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89 #define LEFTOVERS_NUM_LEVELS 1
90 #define LEFTOVERS_NUM_PRIOS 1
92 #define BY_PASS_PRIO_NUM_LEVELS 1
93 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
96 #define ETHTOOL_PRIO_NUM_LEVELS 1
97 #define ETHTOOL_NUM_PRIOS 11
98 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
99 /* Vlan, mac, ttc, inner ttc, aRFS */
100 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
101 #define KERNEL_NIC_NUM_PRIOS 1
102 /* One more level for tc */
103 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
105 #define KERNEL_NIC_TC_NUM_PRIOS 1
106 #define KERNEL_NIC_TC_NUM_LEVELS 2
108 #define ANCHOR_NUM_LEVELS 1
109 #define ANCHOR_NUM_PRIOS 1
110 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
112 #define OFFLOADS_MAX_FT 1
113 #define OFFLOADS_NUM_PRIOS 1
114 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
116 #define LAG_PRIO_NUM_LEVELS 1
117 #define LAG_NUM_PRIOS 1
118 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
125 static struct init_tree_node
{
126 enum fs_node_type type
;
127 struct init_tree_node
*children
;
129 struct node_caps caps
;
135 .type
= FS_TYPE_NAMESPACE
,
137 .children
= (struct init_tree_node
[]) {
138 ADD_PRIO(0, BY_PASS_MIN_LEVEL
, 0,
140 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS
,
141 BY_PASS_PRIO_NUM_LEVELS
))),
142 ADD_PRIO(0, LAG_MIN_LEVEL
, 0,
144 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS
,
145 LAG_PRIO_NUM_LEVELS
))),
146 ADD_PRIO(0, OFFLOADS_MIN_LEVEL
, 0, {},
147 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS
, OFFLOADS_MAX_FT
))),
148 ADD_PRIO(0, ETHTOOL_MIN_LEVEL
, 0,
150 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS
,
151 ETHTOOL_PRIO_NUM_LEVELS
))),
152 ADD_PRIO(0, KERNEL_MIN_LEVEL
, 0, {},
153 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS
, KERNEL_NIC_TC_NUM_LEVELS
),
154 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS
,
155 KERNEL_NIC_PRIO_NUM_LEVELS
))),
156 ADD_PRIO(0, BY_PASS_MIN_LEVEL
, 0,
158 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS
, LEFTOVERS_NUM_LEVELS
))),
159 ADD_PRIO(0, ANCHOR_MIN_LEVEL
, 0, {},
160 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS
, ANCHOR_NUM_LEVELS
))),
164 static struct init_tree_node egress_root_fs
= {
165 .type
= FS_TYPE_NAMESPACE
,
167 .children
= (struct init_tree_node
[]) {
168 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS
, 0,
169 FS_CHAINING_CAPS_EGRESS
,
170 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS
,
171 BY_PASS_PRIO_NUM_LEVELS
))),
175 enum fs_i_lock_class
{
181 static const struct rhashtable_params rhash_fte
= {
182 .key_len
= FIELD_SIZEOF(struct fs_fte
, val
),
183 .key_offset
= offsetof(struct fs_fte
, val
),
184 .head_offset
= offsetof(struct fs_fte
, hash
),
185 .automatic_shrinking
= true,
189 static const struct rhashtable_params rhash_fg
= {
190 .key_len
= FIELD_SIZEOF(struct mlx5_flow_group
, mask
),
191 .key_offset
= offsetof(struct mlx5_flow_group
, mask
),
192 .head_offset
= offsetof(struct mlx5_flow_group
, hash
),
193 .automatic_shrinking
= true,
198 static void del_hw_flow_table(struct fs_node
*node
);
199 static void del_hw_flow_group(struct fs_node
*node
);
200 static void del_hw_fte(struct fs_node
*node
);
201 static void del_sw_flow_table(struct fs_node
*node
);
202 static void del_sw_flow_group(struct fs_node
*node
);
203 static void del_sw_fte(struct fs_node
*node
);
204 static void del_sw_prio(struct fs_node
*node
);
205 static void del_sw_ns(struct fs_node
*node
);
206 /* Delete rule (destination) is special case that
207 * requires to lock the FTE for all the deletion process.
209 static void del_sw_hw_rule(struct fs_node
*node
);
210 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination
*d1
,
211 struct mlx5_flow_destination
*d2
);
212 static void cleanup_root_ns(struct mlx5_flow_root_namespace
*root_ns
);
213 static struct mlx5_flow_rule
*
214 find_flow_rule(struct fs_fte
*fte
,
215 struct mlx5_flow_destination
*dest
);
217 static void tree_init_node(struct fs_node
*node
,
218 void (*del_hw_func
)(struct fs_node
*),
219 void (*del_sw_func
)(struct fs_node
*))
221 refcount_set(&node
->refcount
, 1);
222 INIT_LIST_HEAD(&node
->list
);
223 INIT_LIST_HEAD(&node
->children
);
224 init_rwsem(&node
->lock
);
225 node
->del_hw_func
= del_hw_func
;
226 node
->del_sw_func
= del_sw_func
;
227 node
->active
= false;
230 static void tree_add_node(struct fs_node
*node
, struct fs_node
*parent
)
233 refcount_inc(&parent
->refcount
);
234 node
->parent
= parent
;
236 /* Parent is the root */
240 node
->root
= parent
->root
;
243 static int tree_get_node(struct fs_node
*node
)
245 return refcount_inc_not_zero(&node
->refcount
);
248 static void nested_down_read_ref_node(struct fs_node
*node
,
249 enum fs_i_lock_class
class)
252 down_read_nested(&node
->lock
, class);
253 refcount_inc(&node
->refcount
);
257 static void nested_down_write_ref_node(struct fs_node
*node
,
258 enum fs_i_lock_class
class)
261 down_write_nested(&node
->lock
, class);
262 refcount_inc(&node
->refcount
);
266 static void down_write_ref_node(struct fs_node
*node
, bool locked
)
270 down_write(&node
->lock
);
271 refcount_inc(&node
->refcount
);
275 static void up_read_ref_node(struct fs_node
*node
)
277 refcount_dec(&node
->refcount
);
278 up_read(&node
->lock
);
281 static void up_write_ref_node(struct fs_node
*node
, bool locked
)
283 refcount_dec(&node
->refcount
);
285 up_write(&node
->lock
);
288 static void tree_put_node(struct fs_node
*node
, bool locked
)
290 struct fs_node
*parent_node
= node
->parent
;
292 if (refcount_dec_and_test(&node
->refcount
)) {
293 if (node
->del_hw_func
)
294 node
->del_hw_func(node
);
296 /* Only root namespace doesn't have parent and we just
297 * need to free its node.
299 down_write_ref_node(parent_node
, locked
);
300 list_del_init(&node
->list
);
301 if (node
->del_sw_func
)
302 node
->del_sw_func(node
);
303 up_write_ref_node(parent_node
, locked
);
309 if (!node
&& parent_node
)
310 tree_put_node(parent_node
, locked
);
313 static int tree_remove_node(struct fs_node
*node
, bool locked
)
315 if (refcount_read(&node
->refcount
) > 1) {
316 refcount_dec(&node
->refcount
);
319 tree_put_node(node
, locked
);
323 static struct fs_prio
*find_prio(struct mlx5_flow_namespace
*ns
,
326 struct fs_prio
*iter_prio
;
328 fs_for_each_prio(iter_prio
, ns
) {
329 if (iter_prio
->prio
== prio
)
336 static bool check_valid_spec(const struct mlx5_flow_spec
*spec
)
340 for (i
= 0; i
< MLX5_ST_SZ_DW_MATCH_PARAM
; i
++)
341 if (spec
->match_value
[i
] & ~spec
->match_criteria
[i
]) {
342 pr_warn("mlx5_core: match_value differs from match_criteria\n");
349 static struct mlx5_flow_root_namespace
*find_root(struct fs_node
*node
)
351 struct fs_node
*root
;
352 struct mlx5_flow_namespace
*ns
;
356 if (WARN_ON(root
->type
!= FS_TYPE_NAMESPACE
)) {
357 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
361 ns
= container_of(root
, struct mlx5_flow_namespace
, node
);
362 return container_of(ns
, struct mlx5_flow_root_namespace
, ns
);
365 static inline struct mlx5_flow_steering
*get_steering(struct fs_node
*node
)
367 struct mlx5_flow_root_namespace
*root
= find_root(node
);
370 return root
->dev
->priv
.steering
;
374 static inline struct mlx5_core_dev
*get_dev(struct fs_node
*node
)
376 struct mlx5_flow_root_namespace
*root
= find_root(node
);
383 static void del_sw_ns(struct fs_node
*node
)
388 static void del_sw_prio(struct fs_node
*node
)
393 static void del_hw_flow_table(struct fs_node
*node
)
395 struct mlx5_flow_root_namespace
*root
;
396 struct mlx5_flow_table
*ft
;
397 struct mlx5_core_dev
*dev
;
400 fs_get_obj(ft
, node
);
401 dev
= get_dev(&ft
->node
);
402 root
= find_root(&ft
->node
);
403 trace_mlx5_fs_del_ft(ft
);
406 err
= root
->cmds
->destroy_flow_table(dev
, ft
);
408 mlx5_core_warn(dev
, "flow steering can't destroy ft\n");
412 static void del_sw_flow_table(struct fs_node
*node
)
414 struct mlx5_flow_table
*ft
;
415 struct fs_prio
*prio
;
417 fs_get_obj(ft
, node
);
419 rhltable_destroy(&ft
->fgs_hash
);
420 fs_get_obj(prio
, ft
->node
.parent
);
425 static void modify_fte(struct fs_fte
*fte
)
427 struct mlx5_flow_root_namespace
*root
;
428 struct mlx5_flow_table
*ft
;
429 struct mlx5_flow_group
*fg
;
430 struct mlx5_core_dev
*dev
;
433 fs_get_obj(fg
, fte
->node
.parent
);
434 fs_get_obj(ft
, fg
->node
.parent
);
435 dev
= get_dev(&fte
->node
);
437 root
= find_root(&ft
->node
);
438 err
= root
->cmds
->update_fte(dev
, ft
, fg
->id
, fte
->modify_mask
, fte
);
441 "%s can't del rule fg id=%d fte_index=%d\n",
442 __func__
, fg
->id
, fte
->index
);
443 fte
->modify_mask
= 0;
446 static void del_sw_hw_rule(struct fs_node
*node
)
448 struct mlx5_flow_rule
*rule
;
451 fs_get_obj(rule
, node
);
452 fs_get_obj(fte
, rule
->node
.parent
);
453 trace_mlx5_fs_del_rule(rule
);
454 if (rule
->sw_action
== MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
) {
455 mutex_lock(&rule
->dest_attr
.ft
->lock
);
456 list_del(&rule
->next_ft
);
457 mutex_unlock(&rule
->dest_attr
.ft
->lock
);
460 if (rule
->dest_attr
.type
== MLX5_FLOW_DESTINATION_TYPE_COUNTER
&&
463 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION
) |
464 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS
);
465 fte
->action
.action
&= ~MLX5_FLOW_CONTEXT_ACTION_COUNT
;
469 if ((fte
->action
.action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) &&
472 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST
);
478 static void del_hw_fte(struct fs_node
*node
)
480 struct mlx5_flow_root_namespace
*root
;
481 struct mlx5_flow_table
*ft
;
482 struct mlx5_flow_group
*fg
;
483 struct mlx5_core_dev
*dev
;
487 fs_get_obj(fte
, node
);
488 fs_get_obj(fg
, fte
->node
.parent
);
489 fs_get_obj(ft
, fg
->node
.parent
);
491 trace_mlx5_fs_del_fte(fte
);
492 dev
= get_dev(&ft
->node
);
493 root
= find_root(&ft
->node
);
495 err
= root
->cmds
->delete_fte(dev
, ft
, fte
);
498 "flow steering can't delete fte in index %d of flow group id %d\n",
504 static void del_sw_fte(struct fs_node
*node
)
506 struct mlx5_flow_steering
*steering
= get_steering(node
);
507 struct mlx5_flow_group
*fg
;
511 fs_get_obj(fte
, node
);
512 fs_get_obj(fg
, fte
->node
.parent
);
514 err
= rhashtable_remove_fast(&fg
->ftes_hash
,
518 ida_simple_remove(&fg
->fte_allocator
, fte
->index
- fg
->start_index
);
519 kmem_cache_free(steering
->ftes_cache
, fte
);
522 static void del_hw_flow_group(struct fs_node
*node
)
524 struct mlx5_flow_root_namespace
*root
;
525 struct mlx5_flow_group
*fg
;
526 struct mlx5_flow_table
*ft
;
527 struct mlx5_core_dev
*dev
;
529 fs_get_obj(fg
, node
);
530 fs_get_obj(ft
, fg
->node
.parent
);
531 dev
= get_dev(&ft
->node
);
532 trace_mlx5_fs_del_fg(fg
);
534 root
= find_root(&ft
->node
);
535 if (fg
->node
.active
&& root
->cmds
->destroy_flow_group(dev
, ft
, fg
->id
))
536 mlx5_core_warn(dev
, "flow steering can't destroy fg %d of ft %d\n",
540 static void del_sw_flow_group(struct fs_node
*node
)
542 struct mlx5_flow_steering
*steering
= get_steering(node
);
543 struct mlx5_flow_group
*fg
;
544 struct mlx5_flow_table
*ft
;
547 fs_get_obj(fg
, node
);
548 fs_get_obj(ft
, fg
->node
.parent
);
550 rhashtable_destroy(&fg
->ftes_hash
);
551 ida_destroy(&fg
->fte_allocator
);
552 if (ft
->autogroup
.active
)
553 ft
->autogroup
.num_groups
--;
554 err
= rhltable_remove(&ft
->fgs_hash
,
558 kmem_cache_free(steering
->fgs_cache
, fg
);
561 static int insert_fte(struct mlx5_flow_group
*fg
, struct fs_fte
*fte
)
566 index
= ida_simple_get(&fg
->fte_allocator
, 0, fg
->max_ftes
, GFP_KERNEL
);
570 fte
->index
= index
+ fg
->start_index
;
571 ret
= rhashtable_insert_fast(&fg
->ftes_hash
,
577 tree_add_node(&fte
->node
, &fg
->node
);
578 list_add_tail(&fte
->node
.list
, &fg
->node
.children
);
582 ida_simple_remove(&fg
->fte_allocator
, index
);
586 static struct fs_fte
*alloc_fte(struct mlx5_flow_table
*ft
,
588 struct mlx5_flow_act
*flow_act
)
590 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
593 fte
= kmem_cache_zalloc(steering
->ftes_cache
, GFP_KERNEL
);
595 return ERR_PTR(-ENOMEM
);
597 memcpy(fte
->val
, match_value
, sizeof(fte
->val
));
598 fte
->node
.type
= FS_TYPE_FLOW_ENTRY
;
599 fte
->action
= *flow_act
;
601 tree_init_node(&fte
->node
, NULL
, del_sw_fte
);
606 static void dealloc_flow_group(struct mlx5_flow_steering
*steering
,
607 struct mlx5_flow_group
*fg
)
609 rhashtable_destroy(&fg
->ftes_hash
);
610 kmem_cache_free(steering
->fgs_cache
, fg
);
613 static struct mlx5_flow_group
*alloc_flow_group(struct mlx5_flow_steering
*steering
,
614 u8 match_criteria_enable
,
615 void *match_criteria
,
619 struct mlx5_flow_group
*fg
;
622 fg
= kmem_cache_zalloc(steering
->fgs_cache
, GFP_KERNEL
);
624 return ERR_PTR(-ENOMEM
);
626 ret
= rhashtable_init(&fg
->ftes_hash
, &rhash_fte
);
628 kmem_cache_free(steering
->fgs_cache
, fg
);
632 ida_init(&fg
->fte_allocator
);
633 fg
->mask
.match_criteria_enable
= match_criteria_enable
;
634 memcpy(&fg
->mask
.match_criteria
, match_criteria
,
635 sizeof(fg
->mask
.match_criteria
));
636 fg
->node
.type
= FS_TYPE_FLOW_GROUP
;
637 fg
->start_index
= start_index
;
638 fg
->max_ftes
= end_index
- start_index
+ 1;
643 static struct mlx5_flow_group
*alloc_insert_flow_group(struct mlx5_flow_table
*ft
,
644 u8 match_criteria_enable
,
645 void *match_criteria
,
648 struct list_head
*prev
)
650 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
651 struct mlx5_flow_group
*fg
;
654 fg
= alloc_flow_group(steering
, match_criteria_enable
, match_criteria
,
655 start_index
, end_index
);
659 /* initialize refcnt, add to parent list */
660 ret
= rhltable_insert(&ft
->fgs_hash
,
664 dealloc_flow_group(steering
, fg
);
668 tree_init_node(&fg
->node
, del_hw_flow_group
, del_sw_flow_group
);
669 tree_add_node(&fg
->node
, &ft
->node
);
670 /* Add node to group list */
671 list_add(&fg
->node
.list
, prev
);
672 atomic_inc(&ft
->node
.version
);
677 static struct mlx5_flow_table
*alloc_flow_table(int level
, u16 vport
, int max_fte
,
678 enum fs_flow_table_type table_type
,
679 enum fs_flow_table_op_mod op_mod
,
682 struct mlx5_flow_table
*ft
;
685 ft
= kzalloc(sizeof(*ft
), GFP_KERNEL
);
687 return ERR_PTR(-ENOMEM
);
689 ret
= rhltable_init(&ft
->fgs_hash
, &rhash_fg
);
696 ft
->node
.type
= FS_TYPE_FLOW_TABLE
;
698 ft
->type
= table_type
;
700 ft
->max_fte
= max_fte
;
702 INIT_LIST_HEAD(&ft
->fwd_rules
);
703 mutex_init(&ft
->lock
);
708 /* If reverse is false, then we search for the first flow table in the
709 * root sub-tree from start(closest from right), else we search for the
710 * last flow table in the root sub-tree till start(closest from left).
712 static struct mlx5_flow_table
*find_closest_ft_recursive(struct fs_node
*root
,
713 struct list_head
*start
,
716 #define list_advance_entry(pos, reverse) \
717 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
719 #define list_for_each_advance_continue(pos, head, reverse) \
720 for (pos = list_advance_entry(pos, reverse); \
721 &pos->list != (head); \
722 pos = list_advance_entry(pos, reverse))
724 struct fs_node
*iter
= list_entry(start
, struct fs_node
, list
);
725 struct mlx5_flow_table
*ft
= NULL
;
727 if (!root
|| root
->type
== FS_TYPE_PRIO_CHAINS
)
730 list_for_each_advance_continue(iter
, &root
->children
, reverse
) {
731 if (iter
->type
== FS_TYPE_FLOW_TABLE
) {
732 fs_get_obj(ft
, iter
);
735 ft
= find_closest_ft_recursive(iter
, &iter
->children
, reverse
);
743 /* If reverse if false then return the first flow table in next priority of
744 * prio in the tree, else return the last flow table in the previous priority
745 * of prio in the tree.
747 static struct mlx5_flow_table
*find_closest_ft(struct fs_prio
*prio
, bool reverse
)
749 struct mlx5_flow_table
*ft
= NULL
;
750 struct fs_node
*curr_node
;
751 struct fs_node
*parent
;
753 parent
= prio
->node
.parent
;
754 curr_node
= &prio
->node
;
755 while (!ft
&& parent
) {
756 ft
= find_closest_ft_recursive(parent
, &curr_node
->list
, reverse
);
758 parent
= curr_node
->parent
;
763 /* Assuming all the tree is locked by mutex chain lock */
764 static struct mlx5_flow_table
*find_next_chained_ft(struct fs_prio
*prio
)
766 return find_closest_ft(prio
, false);
769 /* Assuming all the tree is locked by mutex chain lock */
770 static struct mlx5_flow_table
*find_prev_chained_ft(struct fs_prio
*prio
)
772 return find_closest_ft(prio
, true);
775 static int connect_fts_in_prio(struct mlx5_core_dev
*dev
,
776 struct fs_prio
*prio
,
777 struct mlx5_flow_table
*ft
)
779 struct mlx5_flow_root_namespace
*root
= find_root(&prio
->node
);
780 struct mlx5_flow_table
*iter
;
784 fs_for_each_ft(iter
, prio
) {
786 err
= root
->cmds
->modify_flow_table(dev
, iter
, ft
);
788 mlx5_core_warn(dev
, "Failed to modify flow table %d\n",
790 /* The driver is out of sync with the FW */
799 /* Connect flow tables from previous priority of prio to ft */
800 static int connect_prev_fts(struct mlx5_core_dev
*dev
,
801 struct mlx5_flow_table
*ft
,
802 struct fs_prio
*prio
)
804 struct mlx5_flow_table
*prev_ft
;
806 prev_ft
= find_prev_chained_ft(prio
);
808 struct fs_prio
*prev_prio
;
810 fs_get_obj(prev_prio
, prev_ft
->node
.parent
);
811 return connect_fts_in_prio(dev
, prev_prio
, ft
);
816 static int update_root_ft_create(struct mlx5_flow_table
*ft
, struct fs_prio
819 struct mlx5_flow_root_namespace
*root
= find_root(&prio
->node
);
820 struct mlx5_ft_underlay_qp
*uqp
;
821 int min_level
= INT_MAX
;
826 min_level
= root
->root_ft
->level
;
828 if (ft
->level
>= min_level
)
831 if (list_empty(&root
->underlay_qpns
)) {
832 /* Don't set any QPN (zero) in case QPN list is empty */
834 err
= root
->cmds
->update_root_ft(root
->dev
, ft
, qpn
, false);
836 list_for_each_entry(uqp
, &root
->underlay_qpns
, list
) {
838 err
= root
->cmds
->update_root_ft(root
->dev
, ft
,
846 mlx5_core_warn(root
->dev
,
847 "Update root flow table of id(%u) qpn(%d) failed\n",
855 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule
*rule
,
856 struct mlx5_flow_destination
*dest
)
858 struct mlx5_flow_root_namespace
*root
;
859 struct mlx5_flow_table
*ft
;
860 struct mlx5_flow_group
*fg
;
862 int modify_mask
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST
);
865 fs_get_obj(fte
, rule
->node
.parent
);
866 if (!(fte
->action
.action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
))
868 down_write_ref_node(&fte
->node
, false);
869 fs_get_obj(fg
, fte
->node
.parent
);
870 fs_get_obj(ft
, fg
->node
.parent
);
872 memcpy(&rule
->dest_attr
, dest
, sizeof(*dest
));
873 root
= find_root(&ft
->node
);
874 err
= root
->cmds
->update_fte(get_dev(&ft
->node
), ft
, fg
->id
,
876 up_write_ref_node(&fte
->node
, false);
881 int mlx5_modify_rule_destination(struct mlx5_flow_handle
*handle
,
882 struct mlx5_flow_destination
*new_dest
,
883 struct mlx5_flow_destination
*old_dest
)
888 if (handle
->num_rules
!= 1)
890 return _mlx5_modify_rule_destination(handle
->rule
[0],
894 for (i
= 0; i
< handle
->num_rules
; i
++) {
895 if (mlx5_flow_dests_cmp(new_dest
, &handle
->rule
[i
]->dest_attr
))
896 return _mlx5_modify_rule_destination(handle
->rule
[i
],
903 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
904 static int connect_fwd_rules(struct mlx5_core_dev
*dev
,
905 struct mlx5_flow_table
*new_next_ft
,
906 struct mlx5_flow_table
*old_next_ft
)
908 struct mlx5_flow_destination dest
= {};
909 struct mlx5_flow_rule
*iter
;
912 /* new_next_ft and old_next_ft could be NULL only
913 * when we create/destroy the anchor flow table.
915 if (!new_next_ft
|| !old_next_ft
)
918 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
919 dest
.ft
= new_next_ft
;
921 mutex_lock(&old_next_ft
->lock
);
922 list_splice_init(&old_next_ft
->fwd_rules
, &new_next_ft
->fwd_rules
);
923 mutex_unlock(&old_next_ft
->lock
);
924 list_for_each_entry(iter
, &new_next_ft
->fwd_rules
, next_ft
) {
925 err
= _mlx5_modify_rule_destination(iter
, &dest
);
927 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
933 static int connect_flow_table(struct mlx5_core_dev
*dev
, struct mlx5_flow_table
*ft
,
934 struct fs_prio
*prio
)
936 struct mlx5_flow_table
*next_ft
;
939 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
941 if (list_empty(&prio
->node
.children
)) {
942 err
= connect_prev_fts(dev
, ft
, prio
);
946 next_ft
= find_next_chained_ft(prio
);
947 err
= connect_fwd_rules(dev
, ft
, next_ft
);
952 if (MLX5_CAP_FLOWTABLE(dev
,
953 flow_table_properties_nic_receive
.modify_root
))
954 err
= update_root_ft_create(ft
, prio
);
958 static void list_add_flow_table(struct mlx5_flow_table
*ft
,
959 struct fs_prio
*prio
)
961 struct list_head
*prev
= &prio
->node
.children
;
962 struct mlx5_flow_table
*iter
;
964 fs_for_each_ft(iter
, prio
) {
965 if (iter
->level
> ft
->level
)
967 prev
= &iter
->node
.list
;
969 list_add(&ft
->node
.list
, prev
);
972 static struct mlx5_flow_table
*__mlx5_create_flow_table(struct mlx5_flow_namespace
*ns
,
973 struct mlx5_flow_table_attr
*ft_attr
,
974 enum fs_flow_table_op_mod op_mod
,
977 struct mlx5_flow_root_namespace
*root
= find_root(&ns
->node
);
978 struct mlx5_flow_table
*next_ft
= NULL
;
979 struct fs_prio
*fs_prio
= NULL
;
980 struct mlx5_flow_table
*ft
;
985 pr_err("mlx5: flow steering failed to find root of namespace\n");
986 return ERR_PTR(-ENODEV
);
989 mutex_lock(&root
->chain_lock
);
990 fs_prio
= find_prio(ns
, ft_attr
->prio
);
995 if (ft_attr
->level
>= fs_prio
->num_levels
) {
999 /* The level is related to the
1000 * priority level range.
1002 ft_attr
->level
+= fs_prio
->start_level
;
1003 ft
= alloc_flow_table(ft_attr
->level
,
1005 ft_attr
->max_fte
? roundup_pow_of_two(ft_attr
->max_fte
) : 0,
1007 op_mod
, ft_attr
->flags
);
1013 tree_init_node(&ft
->node
, del_hw_flow_table
, del_sw_flow_table
);
1014 log_table_sz
= ft
->max_fte
? ilog2(ft
->max_fte
) : 0;
1015 next_ft
= find_next_chained_ft(fs_prio
);
1016 err
= root
->cmds
->create_flow_table(root
->dev
, ft
->vport
, ft
->op_mod
,
1017 ft
->type
, ft
->level
, log_table_sz
,
1018 next_ft
, &ft
->id
, ft
->flags
);
1022 err
= connect_flow_table(root
->dev
, ft
, fs_prio
);
1025 ft
->node
.active
= true;
1026 down_write_ref_node(&fs_prio
->node
, false);
1027 tree_add_node(&ft
->node
, &fs_prio
->node
);
1028 list_add_flow_table(ft
, fs_prio
);
1030 up_write_ref_node(&fs_prio
->node
, false);
1031 mutex_unlock(&root
->chain_lock
);
1032 trace_mlx5_fs_add_ft(ft
);
1035 root
->cmds
->destroy_flow_table(root
->dev
, ft
);
1039 mutex_unlock(&root
->chain_lock
);
1040 return ERR_PTR(err
);
1043 struct mlx5_flow_table
*mlx5_create_flow_table(struct mlx5_flow_namespace
*ns
,
1044 struct mlx5_flow_table_attr
*ft_attr
)
1046 return __mlx5_create_flow_table(ns
, ft_attr
, FS_FT_OP_MOD_NORMAL
, 0);
1049 struct mlx5_flow_table
*mlx5_create_vport_flow_table(struct mlx5_flow_namespace
*ns
,
1050 int prio
, int max_fte
,
1051 u32 level
, u16 vport
)
1053 struct mlx5_flow_table_attr ft_attr
= {};
1055 ft_attr
.max_fte
= max_fte
;
1056 ft_attr
.level
= level
;
1057 ft_attr
.prio
= prio
;
1059 return __mlx5_create_flow_table(ns
, &ft_attr
, FS_FT_OP_MOD_NORMAL
, vport
);
1062 struct mlx5_flow_table
*
1063 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace
*ns
,
1064 int prio
, u32 level
)
1066 struct mlx5_flow_table_attr ft_attr
= {};
1068 ft_attr
.level
= level
;
1069 ft_attr
.prio
= prio
;
1070 return __mlx5_create_flow_table(ns
, &ft_attr
, FS_FT_OP_MOD_LAG_DEMUX
, 0);
1072 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table
);
1074 struct mlx5_flow_table
*
1075 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace
*ns
,
1077 int num_flow_table_entries
,
1082 struct mlx5_flow_table_attr ft_attr
= {};
1083 struct mlx5_flow_table
*ft
;
1085 if (max_num_groups
> num_flow_table_entries
)
1086 return ERR_PTR(-EINVAL
);
1088 ft_attr
.max_fte
= num_flow_table_entries
;
1089 ft_attr
.prio
= prio
;
1090 ft_attr
.level
= level
;
1091 ft_attr
.flags
= flags
;
1093 ft
= mlx5_create_flow_table(ns
, &ft_attr
);
1097 ft
->autogroup
.active
= true;
1098 ft
->autogroup
.required_groups
= max_num_groups
;
1102 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table
);
1104 struct mlx5_flow_group
*mlx5_create_flow_group(struct mlx5_flow_table
*ft
,
1107 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
1108 void *match_criteria
= MLX5_ADDR_OF(create_flow_group_in
,
1109 fg_in
, match_criteria
);
1110 u8 match_criteria_enable
= MLX5_GET(create_flow_group_in
,
1112 match_criteria_enable
);
1113 int start_index
= MLX5_GET(create_flow_group_in
, fg_in
,
1115 int end_index
= MLX5_GET(create_flow_group_in
, fg_in
,
1117 struct mlx5_core_dev
*dev
= get_dev(&ft
->node
);
1118 struct mlx5_flow_group
*fg
;
1121 if (ft
->autogroup
.active
)
1122 return ERR_PTR(-EPERM
);
1124 down_write_ref_node(&ft
->node
, false);
1125 fg
= alloc_insert_flow_group(ft
, match_criteria_enable
, match_criteria
,
1126 start_index
, end_index
,
1127 ft
->node
.children
.prev
);
1128 up_write_ref_node(&ft
->node
, false);
1132 err
= root
->cmds
->create_flow_group(dev
, ft
, fg_in
, &fg
->id
);
1134 tree_put_node(&fg
->node
, false);
1135 return ERR_PTR(err
);
1137 trace_mlx5_fs_add_fg(fg
);
1138 fg
->node
.active
= true;
1143 static struct mlx5_flow_rule
*alloc_rule(struct mlx5_flow_destination
*dest
)
1145 struct mlx5_flow_rule
*rule
;
1147 rule
= kzalloc(sizeof(*rule
), GFP_KERNEL
);
1151 INIT_LIST_HEAD(&rule
->next_ft
);
1152 rule
->node
.type
= FS_TYPE_FLOW_DEST
;
1154 memcpy(&rule
->dest_attr
, dest
, sizeof(*dest
));
1159 static struct mlx5_flow_handle
*alloc_handle(int num_rules
)
1161 struct mlx5_flow_handle
*handle
;
1163 handle
= kzalloc(struct_size(handle
, rule
, num_rules
), GFP_KERNEL
);
1167 handle
->num_rules
= num_rules
;
1172 static void destroy_flow_handle(struct fs_fte
*fte
,
1173 struct mlx5_flow_handle
*handle
,
1174 struct mlx5_flow_destination
*dest
,
1178 if (refcount_dec_and_test(&handle
->rule
[i
]->node
.refcount
)) {
1180 list_del(&handle
->rule
[i
]->node
.list
);
1181 kfree(handle
->rule
[i
]);
1187 static struct mlx5_flow_handle
*
1188 create_flow_handle(struct fs_fte
*fte
,
1189 struct mlx5_flow_destination
*dest
,
1194 struct mlx5_flow_handle
*handle
;
1195 struct mlx5_flow_rule
*rule
= NULL
;
1196 static int count
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS
);
1197 static int dst
= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST
);
1201 handle
= alloc_handle((dest_num
) ? dest_num
: 1);
1203 return ERR_PTR(-ENOMEM
);
1207 rule
= find_flow_rule(fte
, dest
+ i
);
1209 refcount_inc(&rule
->node
.refcount
);
1215 rule
= alloc_rule(dest
+ i
);
1219 /* Add dest to dests list- we need flow tables to be in the
1220 * end of the list for forward to next prio rules.
1222 tree_init_node(&rule
->node
, NULL
, del_sw_hw_rule
);
1224 dest
[i
].type
!= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
)
1225 list_add(&rule
->node
.list
, &fte
->node
.children
);
1227 list_add_tail(&rule
->node
.list
, &fte
->node
.children
);
1231 type
= dest
[i
].type
==
1232 MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
1233 *modify_mask
|= type
? count
: dst
;
1236 handle
->rule
[i
] = rule
;
1237 } while (++i
< dest_num
);
1242 destroy_flow_handle(fte
, handle
, dest
, i
);
1243 return ERR_PTR(-ENOMEM
);
1246 /* fte should not be deleted while calling this function */
1247 static struct mlx5_flow_handle
*
1248 add_rule_fte(struct fs_fte
*fte
,
1249 struct mlx5_flow_group
*fg
,
1250 struct mlx5_flow_destination
*dest
,
1254 struct mlx5_flow_root_namespace
*root
;
1255 struct mlx5_flow_handle
*handle
;
1256 struct mlx5_flow_table
*ft
;
1257 int modify_mask
= 0;
1259 bool new_rule
= false;
1261 handle
= create_flow_handle(fte
, dest
, dest_num
, &modify_mask
,
1263 if (IS_ERR(handle
) || !new_rule
)
1267 modify_mask
|= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION
);
1269 fs_get_obj(ft
, fg
->node
.parent
);
1270 root
= find_root(&fg
->node
);
1271 if (!(fte
->status
& FS_FTE_STATUS_EXISTING
))
1272 err
= root
->cmds
->create_fte(get_dev(&ft
->node
),
1275 err
= root
->cmds
->update_fte(get_dev(&ft
->node
), ft
, fg
->id
,
1280 fte
->node
.active
= true;
1281 fte
->status
|= FS_FTE_STATUS_EXISTING
;
1282 atomic_inc(&fte
->node
.version
);
1288 destroy_flow_handle(fte
, handle
, dest
, handle
->num_rules
);
1289 return ERR_PTR(err
);
1292 static struct mlx5_flow_group
*alloc_auto_flow_group(struct mlx5_flow_table
*ft
,
1293 struct mlx5_flow_spec
*spec
)
1295 struct list_head
*prev
= &ft
->node
.children
;
1296 struct mlx5_flow_group
*fg
;
1297 unsigned int candidate_index
= 0;
1298 unsigned int group_size
= 0;
1300 if (!ft
->autogroup
.active
)
1301 return ERR_PTR(-ENOENT
);
1303 if (ft
->autogroup
.num_groups
< ft
->autogroup
.required_groups
)
1304 /* We save place for flow groups in addition to max types */
1305 group_size
= ft
->max_fte
/ (ft
->autogroup
.required_groups
+ 1);
1307 /* ft->max_fte == ft->autogroup.max_types */
1308 if (group_size
== 0)
1311 /* sorted by start_index */
1312 fs_for_each_fg(fg
, ft
) {
1313 if (candidate_index
+ group_size
> fg
->start_index
)
1314 candidate_index
= fg
->start_index
+ fg
->max_ftes
;
1317 prev
= &fg
->node
.list
;
1320 if (candidate_index
+ group_size
> ft
->max_fte
)
1321 return ERR_PTR(-ENOSPC
);
1323 fg
= alloc_insert_flow_group(ft
,
1324 spec
->match_criteria_enable
,
1325 spec
->match_criteria
,
1327 candidate_index
+ group_size
- 1,
1332 ft
->autogroup
.num_groups
++;
1338 static int create_auto_flow_group(struct mlx5_flow_table
*ft
,
1339 struct mlx5_flow_group
*fg
)
1341 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
1342 struct mlx5_core_dev
*dev
= get_dev(&ft
->node
);
1343 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1344 void *match_criteria_addr
;
1345 u8 src_esw_owner_mask_on
;
1350 in
= kvzalloc(inlen
, GFP_KERNEL
);
1354 MLX5_SET(create_flow_group_in
, in
, match_criteria_enable
,
1355 fg
->mask
.match_criteria_enable
);
1356 MLX5_SET(create_flow_group_in
, in
, start_flow_index
, fg
->start_index
);
1357 MLX5_SET(create_flow_group_in
, in
, end_flow_index
, fg
->start_index
+
1360 misc
= MLX5_ADDR_OF(fte_match_param
, fg
->mask
.match_criteria
,
1362 src_esw_owner_mask_on
= !!MLX5_GET(fte_match_set_misc
, misc
,
1363 source_eswitch_owner_vhca_id
);
1364 MLX5_SET(create_flow_group_in
, in
,
1365 source_eswitch_owner_vhca_id_valid
, src_esw_owner_mask_on
);
1367 match_criteria_addr
= MLX5_ADDR_OF(create_flow_group_in
,
1368 in
, match_criteria
);
1369 memcpy(match_criteria_addr
, fg
->mask
.match_criteria
,
1370 sizeof(fg
->mask
.match_criteria
));
1372 err
= root
->cmds
->create_flow_group(dev
, ft
, in
, &fg
->id
);
1374 fg
->node
.active
= true;
1375 trace_mlx5_fs_add_fg(fg
);
1382 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination
*d1
,
1383 struct mlx5_flow_destination
*d2
)
1385 if (d1
->type
== d2
->type
) {
1386 if ((d1
->type
== MLX5_FLOW_DESTINATION_TYPE_VPORT
&&
1387 d1
->vport
.num
== d2
->vport
.num
&&
1388 d1
->vport
.flags
== d2
->vport
.flags
&&
1389 ((d1
->vport
.flags
& MLX5_FLOW_DEST_VPORT_REFORMAT_ID
) ?
1390 (d1
->vport
.reformat_id
== d2
->vport
.reformat_id
) : true)) ||
1391 (d1
->type
== MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
&&
1392 d1
->ft
== d2
->ft
) ||
1393 (d1
->type
== MLX5_FLOW_DESTINATION_TYPE_TIR
&&
1394 d1
->tir_num
== d2
->tir_num
) ||
1395 (d1
->type
== MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM
&&
1396 d1
->ft_num
== d2
->ft_num
))
1403 static struct mlx5_flow_rule
*find_flow_rule(struct fs_fte
*fte
,
1404 struct mlx5_flow_destination
*dest
)
1406 struct mlx5_flow_rule
*rule
;
1408 list_for_each_entry(rule
, &fte
->node
.children
, node
.list
) {
1409 if (mlx5_flow_dests_cmp(&rule
->dest_attr
, dest
))
1415 static bool check_conflicting_actions(u32 action1
, u32 action2
)
1417 u32 xored_actions
= action1
^ action2
;
1419 /* if one rule only wants to count, it's ok */
1420 if (action1
== MLX5_FLOW_CONTEXT_ACTION_COUNT
||
1421 action2
== MLX5_FLOW_CONTEXT_ACTION_COUNT
)
1424 if (xored_actions
& (MLX5_FLOW_CONTEXT_ACTION_DROP
|
1425 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
|
1426 MLX5_FLOW_CONTEXT_ACTION_DECAP
|
1427 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
|
1428 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
|
1429 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
|
1430 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2
|
1431 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2
))
1437 static int check_conflicting_ftes(struct fs_fte
*fte
, const struct mlx5_flow_act
*flow_act
)
1439 if (check_conflicting_actions(flow_act
->action
, fte
->action
.action
)) {
1440 mlx5_core_warn(get_dev(&fte
->node
),
1441 "Found two FTEs with conflicting actions\n");
1445 if ((flow_act
->flags
& FLOW_ACT_HAS_TAG
) &&
1446 fte
->action
.flow_tag
!= flow_act
->flow_tag
) {
1447 mlx5_core_warn(get_dev(&fte
->node
),
1448 "FTE flow tag %u already exists with different flow tag %u\n",
1449 fte
->action
.flow_tag
,
1450 flow_act
->flow_tag
);
1457 static struct mlx5_flow_handle
*add_rule_fg(struct mlx5_flow_group
*fg
,
1459 struct mlx5_flow_act
*flow_act
,
1460 struct mlx5_flow_destination
*dest
,
1464 struct mlx5_flow_handle
*handle
;
1469 ret
= check_conflicting_ftes(fte
, flow_act
);
1471 return ERR_PTR(ret
);
1473 old_action
= fte
->action
.action
;
1474 fte
->action
.action
|= flow_act
->action
;
1475 handle
= add_rule_fte(fte
, fg
, dest
, dest_num
,
1476 old_action
!= flow_act
->action
);
1477 if (IS_ERR(handle
)) {
1478 fte
->action
.action
= old_action
;
1481 trace_mlx5_fs_set_fte(fte
, false);
1483 for (i
= 0; i
< handle
->num_rules
; i
++) {
1484 if (refcount_read(&handle
->rule
[i
]->node
.refcount
) == 1) {
1485 tree_add_node(&handle
->rule
[i
]->node
, &fte
->node
);
1486 trace_mlx5_fs_add_rule(handle
->rule
[i
]);
1492 static bool counter_is_valid(u32 action
)
1494 return (action
& (MLX5_FLOW_CONTEXT_ACTION_DROP
|
1495 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
));
1498 static bool dest_is_valid(struct mlx5_flow_destination
*dest
,
1500 struct mlx5_flow_table
*ft
)
1502 if (dest
&& (dest
->type
== MLX5_FLOW_DESTINATION_TYPE_COUNTER
))
1503 return counter_is_valid(action
);
1505 if (!(action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
))
1508 if (!dest
|| ((dest
->type
==
1509 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
) &&
1510 (dest
->ft
->level
<= ft
->level
)))
1516 struct list_head list
;
1517 struct mlx5_flow_group
*g
;
1520 struct match_list_head
{
1521 struct list_head list
;
1522 struct match_list first
;
1525 static void free_match_list(struct match_list_head
*head
)
1527 if (!list_empty(&head
->list
)) {
1528 struct match_list
*iter
, *match_tmp
;
1530 list_del(&head
->first
.list
);
1531 tree_put_node(&head
->first
.g
->node
, false);
1532 list_for_each_entry_safe(iter
, match_tmp
, &head
->list
,
1534 tree_put_node(&iter
->g
->node
, false);
1535 list_del(&iter
->list
);
1541 static int build_match_list(struct match_list_head
*match_head
,
1542 struct mlx5_flow_table
*ft
,
1543 struct mlx5_flow_spec
*spec
)
1545 struct rhlist_head
*tmp
, *list
;
1546 struct mlx5_flow_group
*g
;
1550 INIT_LIST_HEAD(&match_head
->list
);
1551 /* Collect all fgs which has a matching match_criteria */
1552 list
= rhltable_lookup(&ft
->fgs_hash
, spec
, rhash_fg
);
1553 /* RCU is atomic, we can't execute FW commands here */
1554 rhl_for_each_entry_rcu(g
, tmp
, list
, hash
) {
1555 struct match_list
*curr_match
;
1557 if (likely(list_empty(&match_head
->list
))) {
1558 if (!tree_get_node(&g
->node
))
1560 match_head
->first
.g
= g
;
1561 list_add_tail(&match_head
->first
.list
,
1566 curr_match
= kmalloc(sizeof(*curr_match
), GFP_ATOMIC
);
1568 free_match_list(match_head
);
1572 if (!tree_get_node(&g
->node
)) {
1577 list_add_tail(&curr_match
->list
, &match_head
->list
);
1584 static u64
matched_fgs_get_version(struct list_head
*match_head
)
1586 struct match_list
*iter
;
1589 list_for_each_entry(iter
, match_head
, list
)
1590 version
+= (u64
)atomic_read(&iter
->g
->node
.version
);
1594 static struct fs_fte
*
1595 lookup_fte_locked(struct mlx5_flow_group
*g
,
1599 struct fs_fte
*fte_tmp
;
1602 nested_down_write_ref_node(&g
->node
, FS_LOCK_PARENT
);
1604 nested_down_read_ref_node(&g
->node
, FS_LOCK_PARENT
);
1605 fte_tmp
= rhashtable_lookup_fast(&g
->ftes_hash
, match_value
,
1607 if (!fte_tmp
|| !tree_get_node(&fte_tmp
->node
)) {
1611 if (!fte_tmp
->node
.active
) {
1612 tree_put_node(&fte_tmp
->node
, false);
1617 nested_down_write_ref_node(&fte_tmp
->node
, FS_LOCK_CHILD
);
1620 up_write_ref_node(&g
->node
, false);
1622 up_read_ref_node(&g
->node
);
1626 static struct mlx5_flow_handle
*
1627 try_add_to_existing_fg(struct mlx5_flow_table
*ft
,
1628 struct list_head
*match_head
,
1629 struct mlx5_flow_spec
*spec
,
1630 struct mlx5_flow_act
*flow_act
,
1631 struct mlx5_flow_destination
*dest
,
1635 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
1636 struct mlx5_flow_group
*g
;
1637 struct mlx5_flow_handle
*rule
;
1638 struct match_list
*iter
;
1639 bool take_write
= false;
1644 fte
= alloc_fte(ft
, spec
->match_value
, flow_act
);
1646 return ERR_PTR(-ENOMEM
);
1648 search_again_locked
:
1649 version
= matched_fgs_get_version(match_head
);
1650 if (flow_act
->flags
& FLOW_ACT_NO_APPEND
)
1652 /* Try to find a fg that already contains a matching fte */
1653 list_for_each_entry(iter
, match_head
, list
) {
1654 struct fs_fte
*fte_tmp
;
1657 fte_tmp
= lookup_fte_locked(g
, spec
->match_value
, take_write
);
1660 rule
= add_rule_fg(g
, spec
->match_value
,
1661 flow_act
, dest
, dest_num
, fte_tmp
);
1662 up_write_ref_node(&fte_tmp
->node
, false);
1663 tree_put_node(&fte_tmp
->node
, false);
1664 kmem_cache_free(steering
->ftes_cache
, fte
);
1669 /* No group with matching fte found, or we skipped the search.
1670 * Try to add a new fte to any matching fg.
1673 /* Check the ft version, for case that new flow group
1674 * was added while the fgs weren't locked
1676 if (atomic_read(&ft
->node
.version
) != ft_version
) {
1677 rule
= ERR_PTR(-EAGAIN
);
1681 /* Check the fgs version, for case the new FTE with the
1682 * same values was added while the fgs weren't locked
1684 if (version
!= matched_fgs_get_version(match_head
)) {
1686 goto search_again_locked
;
1689 list_for_each_entry(iter
, match_head
, list
) {
1692 if (!g
->node
.active
)
1695 nested_down_write_ref_node(&g
->node
, FS_LOCK_PARENT
);
1697 err
= insert_fte(g
, fte
);
1699 up_write_ref_node(&g
->node
, false);
1702 kmem_cache_free(steering
->ftes_cache
, fte
);
1703 return ERR_PTR(err
);
1706 nested_down_write_ref_node(&fte
->node
, FS_LOCK_CHILD
);
1707 up_write_ref_node(&g
->node
, false);
1708 rule
= add_rule_fg(g
, spec
->match_value
,
1709 flow_act
, dest
, dest_num
, fte
);
1710 up_write_ref_node(&fte
->node
, false);
1711 tree_put_node(&fte
->node
, false);
1714 rule
= ERR_PTR(-ENOENT
);
1716 kmem_cache_free(steering
->ftes_cache
, fte
);
1720 static struct mlx5_flow_handle
*
1721 _mlx5_add_flow_rules(struct mlx5_flow_table
*ft
,
1722 struct mlx5_flow_spec
*spec
,
1723 struct mlx5_flow_act
*flow_act
,
1724 struct mlx5_flow_destination
*dest
,
1728 struct mlx5_flow_steering
*steering
= get_steering(&ft
->node
);
1729 struct mlx5_flow_group
*g
;
1730 struct mlx5_flow_handle
*rule
;
1731 struct match_list_head match_head
;
1732 bool take_write
= false;
1738 if (!check_valid_spec(spec
))
1739 return ERR_PTR(-EINVAL
);
1741 for (i
= 0; i
< dest_num
; i
++) {
1742 if (!dest_is_valid(&dest
[i
], flow_act
->action
, ft
))
1743 return ERR_PTR(-EINVAL
);
1745 nested_down_read_ref_node(&ft
->node
, FS_LOCK_GRANDPARENT
);
1746 search_again_locked
:
1747 version
= atomic_read(&ft
->node
.version
);
1749 /* Collect all fgs which has a matching match_criteria */
1750 err
= build_match_list(&match_head
, ft
, spec
);
1753 up_write_ref_node(&ft
->node
, false);
1755 up_read_ref_node(&ft
->node
);
1756 return ERR_PTR(err
);
1760 up_read_ref_node(&ft
->node
);
1762 rule
= try_add_to_existing_fg(ft
, &match_head
.list
, spec
, flow_act
, dest
,
1764 free_match_list(&match_head
);
1765 if (!IS_ERR(rule
) ||
1766 (PTR_ERR(rule
) != -ENOENT
&& PTR_ERR(rule
) != -EAGAIN
)) {
1768 up_write_ref_node(&ft
->node
, false);
1773 nested_down_write_ref_node(&ft
->node
, FS_LOCK_GRANDPARENT
);
1777 if (PTR_ERR(rule
) == -EAGAIN
||
1778 version
!= atomic_read(&ft
->node
.version
))
1779 goto search_again_locked
;
1781 g
= alloc_auto_flow_group(ft
, spec
);
1784 up_write_ref_node(&ft
->node
, false);
1788 nested_down_write_ref_node(&g
->node
, FS_LOCK_PARENT
);
1789 up_write_ref_node(&ft
->node
, false);
1791 err
= create_auto_flow_group(ft
, g
);
1793 goto err_release_fg
;
1795 fte
= alloc_fte(ft
, spec
->match_value
, flow_act
);
1798 goto err_release_fg
;
1801 err
= insert_fte(g
, fte
);
1803 kmem_cache_free(steering
->ftes_cache
, fte
);
1804 goto err_release_fg
;
1807 nested_down_write_ref_node(&fte
->node
, FS_LOCK_CHILD
);
1808 up_write_ref_node(&g
->node
, false);
1809 rule
= add_rule_fg(g
, spec
->match_value
, flow_act
, dest
,
1811 up_write_ref_node(&fte
->node
, false);
1812 tree_put_node(&fte
->node
, false);
1813 tree_put_node(&g
->node
, false);
1817 up_write_ref_node(&g
->node
, false);
1818 tree_put_node(&g
->node
, false);
1819 return ERR_PTR(err
);
1822 static bool fwd_next_prio_supported(struct mlx5_flow_table
*ft
)
1824 return ((ft
->type
== FS_FT_NIC_RX
) &&
1825 (MLX5_CAP_FLOWTABLE(get_dev(&ft
->node
), nic_rx_multi_path_tirs
)));
1828 struct mlx5_flow_handle
*
1829 mlx5_add_flow_rules(struct mlx5_flow_table
*ft
,
1830 struct mlx5_flow_spec
*spec
,
1831 struct mlx5_flow_act
*flow_act
,
1832 struct mlx5_flow_destination
*dest
,
1835 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
1836 struct mlx5_flow_destination gen_dest
= {};
1837 struct mlx5_flow_table
*next_ft
= NULL
;
1838 struct mlx5_flow_handle
*handle
= NULL
;
1839 u32 sw_action
= flow_act
->action
;
1840 struct fs_prio
*prio
;
1842 fs_get_obj(prio
, ft
->node
.parent
);
1843 if (flow_act
->action
== MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
) {
1844 if (!fwd_next_prio_supported(ft
))
1845 return ERR_PTR(-EOPNOTSUPP
);
1847 return ERR_PTR(-EINVAL
);
1848 mutex_lock(&root
->chain_lock
);
1849 next_ft
= find_next_chained_ft(prio
);
1851 gen_dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1852 gen_dest
.ft
= next_ft
;
1855 flow_act
->action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1857 mutex_unlock(&root
->chain_lock
);
1858 return ERR_PTR(-EOPNOTSUPP
);
1862 handle
= _mlx5_add_flow_rules(ft
, spec
, flow_act
, dest
, num_dest
);
1864 if (sw_action
== MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
) {
1865 if (!IS_ERR_OR_NULL(handle
) &&
1866 (list_empty(&handle
->rule
[0]->next_ft
))) {
1867 mutex_lock(&next_ft
->lock
);
1868 list_add(&handle
->rule
[0]->next_ft
,
1869 &next_ft
->fwd_rules
);
1870 mutex_unlock(&next_ft
->lock
);
1871 handle
->rule
[0]->sw_action
= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
;
1873 mutex_unlock(&root
->chain_lock
);
1877 EXPORT_SYMBOL(mlx5_add_flow_rules
);
1879 void mlx5_del_flow_rules(struct mlx5_flow_handle
*handle
)
1884 /* In order to consolidate the HW changes we lock the FTE for other
1885 * changes, and increase its refcount, in order not to perform the
1886 * "del" functions of the FTE. Will handle them here.
1887 * The removal of the rules is done under locked FTE.
1888 * After removing all the handle's rules, if there are remaining
1889 * rules, it means we just need to modify the FTE in FW, and
1890 * unlock/decrease the refcount we increased before.
1891 * Otherwise, it means the FTE should be deleted. First delete the
1892 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
1893 * the FTE, which will handle the last decrease of the refcount, as
1894 * well as required handling of its parent.
1896 fs_get_obj(fte
, handle
->rule
[0]->node
.parent
);
1897 down_write_ref_node(&fte
->node
, false);
1898 for (i
= handle
->num_rules
- 1; i
>= 0; i
--)
1899 tree_remove_node(&handle
->rule
[i
]->node
, true);
1900 if (fte
->modify_mask
&& fte
->dests_size
) {
1902 up_write_ref_node(&fte
->node
, false);
1904 del_hw_fte(&fte
->node
);
1905 up_write(&fte
->node
.lock
);
1906 tree_put_node(&fte
->node
, false);
1910 EXPORT_SYMBOL(mlx5_del_flow_rules
);
1912 /* Assuming prio->node.children(flow tables) is sorted by level */
1913 static struct mlx5_flow_table
*find_next_ft(struct mlx5_flow_table
*ft
)
1915 struct fs_prio
*prio
;
1917 fs_get_obj(prio
, ft
->node
.parent
);
1919 if (!list_is_last(&ft
->node
.list
, &prio
->node
.children
))
1920 return list_next_entry(ft
, node
.list
);
1921 return find_next_chained_ft(prio
);
1924 static int update_root_ft_destroy(struct mlx5_flow_table
*ft
)
1926 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
1927 struct mlx5_ft_underlay_qp
*uqp
;
1928 struct mlx5_flow_table
*new_root_ft
= NULL
;
1932 if (root
->root_ft
!= ft
)
1935 new_root_ft
= find_next_ft(ft
);
1937 root
->root_ft
= NULL
;
1941 if (list_empty(&root
->underlay_qpns
)) {
1942 /* Don't set any QPN (zero) in case QPN list is empty */
1944 err
= root
->cmds
->update_root_ft(root
->dev
, new_root_ft
,
1947 list_for_each_entry(uqp
, &root
->underlay_qpns
, list
) {
1949 err
= root
->cmds
->update_root_ft(root
->dev
,
1958 mlx5_core_warn(root
->dev
,
1959 "Update root flow table of id(%u) qpn(%d) failed\n",
1962 root
->root_ft
= new_root_ft
;
1967 /* Connect flow table from previous priority to
1968 * the next flow table.
1970 static int disconnect_flow_table(struct mlx5_flow_table
*ft
)
1972 struct mlx5_core_dev
*dev
= get_dev(&ft
->node
);
1973 struct mlx5_flow_table
*next_ft
;
1974 struct fs_prio
*prio
;
1977 err
= update_root_ft_destroy(ft
);
1981 fs_get_obj(prio
, ft
->node
.parent
);
1982 if (!(list_first_entry(&prio
->node
.children
,
1983 struct mlx5_flow_table
,
1987 next_ft
= find_next_chained_ft(prio
);
1988 err
= connect_fwd_rules(dev
, next_ft
, ft
);
1992 err
= connect_prev_fts(dev
, next_ft
, prio
);
1994 mlx5_core_warn(dev
, "Failed to disconnect flow table %d\n",
1999 int mlx5_destroy_flow_table(struct mlx5_flow_table
*ft
)
2001 struct mlx5_flow_root_namespace
*root
= find_root(&ft
->node
);
2004 mutex_lock(&root
->chain_lock
);
2005 err
= disconnect_flow_table(ft
);
2007 mutex_unlock(&root
->chain_lock
);
2010 if (tree_remove_node(&ft
->node
, false))
2011 mlx5_core_warn(get_dev(&ft
->node
), "Flow table %d wasn't destroyed, refcount > 1\n",
2013 mutex_unlock(&root
->chain_lock
);
2017 EXPORT_SYMBOL(mlx5_destroy_flow_table
);
2019 void mlx5_destroy_flow_group(struct mlx5_flow_group
*fg
)
2021 if (tree_remove_node(&fg
->node
, false))
2022 mlx5_core_warn(get_dev(&fg
->node
), "Flow group %d wasn't destroyed, refcount > 1\n",
2026 struct mlx5_flow_namespace
*mlx5_get_fdb_sub_ns(struct mlx5_core_dev
*dev
,
2029 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2031 if (!steering
|| !steering
->fdb_sub_ns
)
2034 return steering
->fdb_sub_ns
[n
];
2036 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns
);
2038 struct mlx5_flow_namespace
*mlx5_get_flow_namespace(struct mlx5_core_dev
*dev
,
2039 enum mlx5_flow_namespace_type type
)
2041 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2042 struct mlx5_flow_root_namespace
*root_ns
;
2044 struct fs_prio
*fs_prio
;
2045 struct mlx5_flow_namespace
*ns
;
2051 case MLX5_FLOW_NAMESPACE_FDB
:
2052 if (steering
->fdb_root_ns
)
2053 return &steering
->fdb_root_ns
->ns
;
2055 case MLX5_FLOW_NAMESPACE_SNIFFER_RX
:
2056 if (steering
->sniffer_rx_root_ns
)
2057 return &steering
->sniffer_rx_root_ns
->ns
;
2059 case MLX5_FLOW_NAMESPACE_SNIFFER_TX
:
2060 if (steering
->sniffer_tx_root_ns
)
2061 return &steering
->sniffer_tx_root_ns
->ns
;
2067 if (type
== MLX5_FLOW_NAMESPACE_EGRESS
) {
2068 root_ns
= steering
->egress_root_ns
;
2069 } else { /* Must be NIC RX */
2070 root_ns
= steering
->root_ns
;
2077 fs_prio
= find_prio(&root_ns
->ns
, prio
);
2081 ns
= list_first_entry(&fs_prio
->node
.children
,
2087 EXPORT_SYMBOL(mlx5_get_flow_namespace
);
2089 struct mlx5_flow_namespace
*mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev
*dev
,
2090 enum mlx5_flow_namespace_type type
,
2093 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2095 if (!steering
|| vport
>= MLX5_TOTAL_VPORTS(dev
))
2099 case MLX5_FLOW_NAMESPACE_ESW_EGRESS
:
2100 if (steering
->esw_egress_root_ns
&&
2101 steering
->esw_egress_root_ns
[vport
])
2102 return &steering
->esw_egress_root_ns
[vport
]->ns
;
2105 case MLX5_FLOW_NAMESPACE_ESW_INGRESS
:
2106 if (steering
->esw_ingress_root_ns
&&
2107 steering
->esw_ingress_root_ns
[vport
])
2108 return &steering
->esw_ingress_root_ns
[vport
]->ns
;
2116 static struct fs_prio
*_fs_create_prio(struct mlx5_flow_namespace
*ns
,
2119 enum fs_node_type type
)
2121 struct fs_prio
*fs_prio
;
2123 fs_prio
= kzalloc(sizeof(*fs_prio
), GFP_KERNEL
);
2125 return ERR_PTR(-ENOMEM
);
2127 fs_prio
->node
.type
= type
;
2128 tree_init_node(&fs_prio
->node
, NULL
, del_sw_prio
);
2129 tree_add_node(&fs_prio
->node
, &ns
->node
);
2130 fs_prio
->num_levels
= num_levels
;
2131 fs_prio
->prio
= prio
;
2132 list_add_tail(&fs_prio
->node
.list
, &ns
->node
.children
);
2137 static struct fs_prio
*fs_create_prio_chained(struct mlx5_flow_namespace
*ns
,
2141 return _fs_create_prio(ns
, prio
, num_levels
, FS_TYPE_PRIO_CHAINS
);
2144 static struct fs_prio
*fs_create_prio(struct mlx5_flow_namespace
*ns
,
2145 unsigned int prio
, int num_levels
)
2147 return _fs_create_prio(ns
, prio
, num_levels
, FS_TYPE_PRIO
);
2150 static struct mlx5_flow_namespace
*fs_init_namespace(struct mlx5_flow_namespace
2153 ns
->node
.type
= FS_TYPE_NAMESPACE
;
2158 static struct mlx5_flow_namespace
*fs_create_namespace(struct fs_prio
*prio
)
2160 struct mlx5_flow_namespace
*ns
;
2162 ns
= kzalloc(sizeof(*ns
), GFP_KERNEL
);
2164 return ERR_PTR(-ENOMEM
);
2166 fs_init_namespace(ns
);
2167 tree_init_node(&ns
->node
, NULL
, del_sw_ns
);
2168 tree_add_node(&ns
->node
, &prio
->node
);
2169 list_add_tail(&ns
->node
.list
, &prio
->node
.children
);
2174 static int create_leaf_prios(struct mlx5_flow_namespace
*ns
, int prio
,
2175 struct init_tree_node
*prio_metadata
)
2177 struct fs_prio
*fs_prio
;
2180 for (i
= 0; i
< prio_metadata
->num_leaf_prios
; i
++) {
2181 fs_prio
= fs_create_prio(ns
, prio
++, prio_metadata
->num_levels
);
2182 if (IS_ERR(fs_prio
))
2183 return PTR_ERR(fs_prio
);
2188 #define FLOW_TABLE_BIT_SZ 1
2189 #define GET_FLOW_TABLE_CAP(dev, offset) \
2190 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2192 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2193 static bool has_required_caps(struct mlx5_core_dev
*dev
, struct node_caps
*caps
)
2197 for (i
= 0; i
< caps
->arr_sz
; i
++) {
2198 if (!GET_FLOW_TABLE_CAP(dev
, caps
->caps
[i
]))
2204 static int init_root_tree_recursive(struct mlx5_flow_steering
*steering
,
2205 struct init_tree_node
*init_node
,
2206 struct fs_node
*fs_parent_node
,
2207 struct init_tree_node
*init_parent_node
,
2210 int max_ft_level
= MLX5_CAP_FLOWTABLE(steering
->dev
,
2211 flow_table_properties_nic_receive
.
2213 struct mlx5_flow_namespace
*fs_ns
;
2214 struct fs_prio
*fs_prio
;
2215 struct fs_node
*base
;
2219 if (init_node
->type
== FS_TYPE_PRIO
) {
2220 if ((init_node
->min_ft_level
> max_ft_level
) ||
2221 !has_required_caps(steering
->dev
, &init_node
->caps
))
2224 fs_get_obj(fs_ns
, fs_parent_node
);
2225 if (init_node
->num_leaf_prios
)
2226 return create_leaf_prios(fs_ns
, prio
, init_node
);
2227 fs_prio
= fs_create_prio(fs_ns
, prio
, init_node
->num_levels
);
2228 if (IS_ERR(fs_prio
))
2229 return PTR_ERR(fs_prio
);
2230 base
= &fs_prio
->node
;
2231 } else if (init_node
->type
== FS_TYPE_NAMESPACE
) {
2232 fs_get_obj(fs_prio
, fs_parent_node
);
2233 fs_ns
= fs_create_namespace(fs_prio
);
2235 return PTR_ERR(fs_ns
);
2236 base
= &fs_ns
->node
;
2241 for (i
= 0; i
< init_node
->ar_size
; i
++) {
2242 err
= init_root_tree_recursive(steering
, &init_node
->children
[i
],
2243 base
, init_node
, prio
);
2246 if (init_node
->children
[i
].type
== FS_TYPE_PRIO
&&
2247 init_node
->children
[i
].num_leaf_prios
) {
2248 prio
+= init_node
->children
[i
].num_leaf_prios
;
2255 static int init_root_tree(struct mlx5_flow_steering
*steering
,
2256 struct init_tree_node
*init_node
,
2257 struct fs_node
*fs_parent_node
)
2260 struct mlx5_flow_namespace
*fs_ns
;
2263 fs_get_obj(fs_ns
, fs_parent_node
);
2264 for (i
= 0; i
< init_node
->ar_size
; i
++) {
2265 err
= init_root_tree_recursive(steering
, &init_node
->children
[i
],
2274 static struct mlx5_flow_root_namespace
2275 *create_root_ns(struct mlx5_flow_steering
*steering
,
2276 enum fs_flow_table_type table_type
)
2278 const struct mlx5_flow_cmds
*cmds
= mlx5_fs_cmd_get_default(table_type
);
2279 struct mlx5_flow_root_namespace
*root_ns
;
2280 struct mlx5_flow_namespace
*ns
;
2282 if (mlx5_accel_ipsec_device_caps(steering
->dev
) & MLX5_ACCEL_IPSEC_CAP_DEVICE
&&
2283 (table_type
== FS_FT_NIC_RX
|| table_type
== FS_FT_NIC_TX
))
2284 cmds
= mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type
);
2286 /* Create the root namespace */
2287 root_ns
= kvzalloc(sizeof(*root_ns
), GFP_KERNEL
);
2291 root_ns
->dev
= steering
->dev
;
2292 root_ns
->table_type
= table_type
;
2293 root_ns
->cmds
= cmds
;
2295 INIT_LIST_HEAD(&root_ns
->underlay_qpns
);
2298 fs_init_namespace(ns
);
2299 mutex_init(&root_ns
->chain_lock
);
2300 tree_init_node(&ns
->node
, NULL
, NULL
);
2301 tree_add_node(&ns
->node
, NULL
);
2306 static void set_prio_attrs_in_prio(struct fs_prio
*prio
, int acc_level
);
2308 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace
*ns
, int acc_level
)
2310 struct fs_prio
*prio
;
2312 fs_for_each_prio(prio
, ns
) {
2313 /* This updates prio start_level and num_levels */
2314 set_prio_attrs_in_prio(prio
, acc_level
);
2315 acc_level
+= prio
->num_levels
;
2320 static void set_prio_attrs_in_prio(struct fs_prio
*prio
, int acc_level
)
2322 struct mlx5_flow_namespace
*ns
;
2323 int acc_level_ns
= acc_level
;
2325 prio
->start_level
= acc_level
;
2326 fs_for_each_ns(ns
, prio
)
2327 /* This updates start_level and num_levels of ns's priority descendants */
2328 acc_level_ns
= set_prio_attrs_in_ns(ns
, acc_level
);
2329 if (!prio
->num_levels
)
2330 prio
->num_levels
= acc_level_ns
- prio
->start_level
;
2331 WARN_ON(prio
->num_levels
< acc_level_ns
- prio
->start_level
);
2334 static void set_prio_attrs(struct mlx5_flow_root_namespace
*root_ns
)
2336 struct mlx5_flow_namespace
*ns
= &root_ns
->ns
;
2337 struct fs_prio
*prio
;
2338 int start_level
= 0;
2340 fs_for_each_prio(prio
, ns
) {
2341 set_prio_attrs_in_prio(prio
, start_level
);
2342 start_level
+= prio
->num_levels
;
2346 #define ANCHOR_PRIO 0
2347 #define ANCHOR_SIZE 1
2348 #define ANCHOR_LEVEL 0
2349 static int create_anchor_flow_table(struct mlx5_flow_steering
*steering
)
2351 struct mlx5_flow_namespace
*ns
= NULL
;
2352 struct mlx5_flow_table_attr ft_attr
= {};
2353 struct mlx5_flow_table
*ft
;
2355 ns
= mlx5_get_flow_namespace(steering
->dev
, MLX5_FLOW_NAMESPACE_ANCHOR
);
2359 ft_attr
.max_fte
= ANCHOR_SIZE
;
2360 ft_attr
.level
= ANCHOR_LEVEL
;
2361 ft_attr
.prio
= ANCHOR_PRIO
;
2363 ft
= mlx5_create_flow_table(ns
, &ft_attr
);
2365 mlx5_core_err(steering
->dev
, "Failed to create last anchor flow table");
2371 static int init_root_ns(struct mlx5_flow_steering
*steering
)
2375 steering
->root_ns
= create_root_ns(steering
, FS_FT_NIC_RX
);
2376 if (!steering
->root_ns
)
2379 err
= init_root_tree(steering
, &root_fs
, &steering
->root_ns
->ns
.node
);
2383 set_prio_attrs(steering
->root_ns
);
2384 err
= create_anchor_flow_table(steering
);
2391 cleanup_root_ns(steering
->root_ns
);
2392 steering
->root_ns
= NULL
;
2396 static void clean_tree(struct fs_node
*node
)
2399 struct fs_node
*iter
;
2400 struct fs_node
*temp
;
2402 tree_get_node(node
);
2403 list_for_each_entry_safe(iter
, temp
, &node
->children
, list
)
2405 tree_put_node(node
, false);
2406 tree_remove_node(node
, false);
2410 static void cleanup_root_ns(struct mlx5_flow_root_namespace
*root_ns
)
2415 clean_tree(&root_ns
->ns
.node
);
2418 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev
*dev
)
2420 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2423 if (!steering
->esw_egress_root_ns
)
2426 for (i
= 0; i
< MLX5_TOTAL_VPORTS(dev
); i
++)
2427 cleanup_root_ns(steering
->esw_egress_root_ns
[i
]);
2429 kfree(steering
->esw_egress_root_ns
);
2432 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev
*dev
)
2434 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2437 if (!steering
->esw_ingress_root_ns
)
2440 for (i
= 0; i
< MLX5_TOTAL_VPORTS(dev
); i
++)
2441 cleanup_root_ns(steering
->esw_ingress_root_ns
[i
]);
2443 kfree(steering
->esw_ingress_root_ns
);
2446 void mlx5_cleanup_fs(struct mlx5_core_dev
*dev
)
2448 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2450 cleanup_root_ns(steering
->root_ns
);
2451 cleanup_egress_acls_root_ns(dev
);
2452 cleanup_ingress_acls_root_ns(dev
);
2453 cleanup_root_ns(steering
->fdb_root_ns
);
2454 steering
->fdb_root_ns
= NULL
;
2455 kfree(steering
->fdb_sub_ns
);
2456 steering
->fdb_sub_ns
= NULL
;
2457 cleanup_root_ns(steering
->sniffer_rx_root_ns
);
2458 cleanup_root_ns(steering
->sniffer_tx_root_ns
);
2459 cleanup_root_ns(steering
->egress_root_ns
);
2460 mlx5_cleanup_fc_stats(dev
);
2461 kmem_cache_destroy(steering
->ftes_cache
);
2462 kmem_cache_destroy(steering
->fgs_cache
);
2466 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering
*steering
)
2468 struct fs_prio
*prio
;
2470 steering
->sniffer_tx_root_ns
= create_root_ns(steering
, FS_FT_SNIFFER_TX
);
2471 if (!steering
->sniffer_tx_root_ns
)
2474 /* Create single prio */
2475 prio
= fs_create_prio(&steering
->sniffer_tx_root_ns
->ns
, 0, 1);
2477 cleanup_root_ns(steering
->sniffer_tx_root_ns
);
2478 return PTR_ERR(prio
);
2483 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering
*steering
)
2485 struct fs_prio
*prio
;
2487 steering
->sniffer_rx_root_ns
= create_root_ns(steering
, FS_FT_SNIFFER_RX
);
2488 if (!steering
->sniffer_rx_root_ns
)
2491 /* Create single prio */
2492 prio
= fs_create_prio(&steering
->sniffer_rx_root_ns
->ns
, 0, 1);
2494 cleanup_root_ns(steering
->sniffer_rx_root_ns
);
2495 return PTR_ERR(prio
);
2500 static int init_fdb_root_ns(struct mlx5_flow_steering
*steering
)
2502 struct mlx5_flow_namespace
*ns
;
2503 struct fs_prio
*maj_prio
;
2504 struct fs_prio
*min_prio
;
2510 steering
->fdb_root_ns
= create_root_ns(steering
, FS_FT_FDB
);
2511 if (!steering
->fdb_root_ns
)
2514 steering
->fdb_sub_ns
= kzalloc(sizeof(steering
->fdb_sub_ns
) *
2515 (FDB_MAX_CHAIN
+ 1), GFP_KERNEL
);
2516 if (!steering
->fdb_sub_ns
)
2519 levels
= 2 * FDB_MAX_PRIO
* (FDB_MAX_CHAIN
+ 1);
2520 maj_prio
= fs_create_prio_chained(&steering
->fdb_root_ns
->ns
, 0,
2522 if (IS_ERR(maj_prio
)) {
2523 err
= PTR_ERR(maj_prio
);
2527 for (chain
= 0; chain
<= FDB_MAX_CHAIN
; chain
++) {
2528 ns
= fs_create_namespace(maj_prio
);
2534 for (prio
= 0; prio
< FDB_MAX_PRIO
* (chain
+ 1); prio
++) {
2535 min_prio
= fs_create_prio(ns
, prio
, 2);
2536 if (IS_ERR(min_prio
)) {
2537 err
= PTR_ERR(min_prio
);
2542 steering
->fdb_sub_ns
[chain
] = ns
;
2545 maj_prio
= fs_create_prio(&steering
->fdb_root_ns
->ns
, 1, 1);
2546 if (IS_ERR(maj_prio
)) {
2547 err
= PTR_ERR(maj_prio
);
2551 set_prio_attrs(steering
->fdb_root_ns
);
2555 cleanup_root_ns(steering
->fdb_root_ns
);
2556 kfree(steering
->fdb_sub_ns
);
2557 steering
->fdb_sub_ns
= NULL
;
2558 steering
->fdb_root_ns
= NULL
;
2562 static int init_egress_acl_root_ns(struct mlx5_flow_steering
*steering
, int vport
)
2564 struct fs_prio
*prio
;
2566 steering
->esw_egress_root_ns
[vport
] = create_root_ns(steering
, FS_FT_ESW_EGRESS_ACL
);
2567 if (!steering
->esw_egress_root_ns
[vport
])
2571 prio
= fs_create_prio(&steering
->esw_egress_root_ns
[vport
]->ns
, 0, 1);
2572 return PTR_ERR_OR_ZERO(prio
);
2575 static int init_ingress_acl_root_ns(struct mlx5_flow_steering
*steering
, int vport
)
2577 struct fs_prio
*prio
;
2579 steering
->esw_ingress_root_ns
[vport
] = create_root_ns(steering
, FS_FT_ESW_INGRESS_ACL
);
2580 if (!steering
->esw_ingress_root_ns
[vport
])
2584 prio
= fs_create_prio(&steering
->esw_ingress_root_ns
[vport
]->ns
, 0, 1);
2585 return PTR_ERR_OR_ZERO(prio
);
2588 static int init_egress_acls_root_ns(struct mlx5_core_dev
*dev
)
2590 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2594 steering
->esw_egress_root_ns
= kcalloc(MLX5_TOTAL_VPORTS(dev
),
2595 sizeof(*steering
->esw_egress_root_ns
),
2597 if (!steering
->esw_egress_root_ns
)
2600 for (i
= 0; i
< MLX5_TOTAL_VPORTS(dev
); i
++) {
2601 err
= init_egress_acl_root_ns(steering
, i
);
2603 goto cleanup_root_ns
;
2609 for (i
--; i
>= 0; i
--)
2610 cleanup_root_ns(steering
->esw_egress_root_ns
[i
]);
2611 kfree(steering
->esw_egress_root_ns
);
2615 static int init_ingress_acls_root_ns(struct mlx5_core_dev
*dev
)
2617 struct mlx5_flow_steering
*steering
= dev
->priv
.steering
;
2621 steering
->esw_ingress_root_ns
= kcalloc(MLX5_TOTAL_VPORTS(dev
),
2622 sizeof(*steering
->esw_ingress_root_ns
),
2624 if (!steering
->esw_ingress_root_ns
)
2627 for (i
= 0; i
< MLX5_TOTAL_VPORTS(dev
); i
++) {
2628 err
= init_ingress_acl_root_ns(steering
, i
);
2630 goto cleanup_root_ns
;
2636 for (i
--; i
>= 0; i
--)
2637 cleanup_root_ns(steering
->esw_ingress_root_ns
[i
]);
2638 kfree(steering
->esw_ingress_root_ns
);
2642 static int init_egress_root_ns(struct mlx5_flow_steering
*steering
)
2646 steering
->egress_root_ns
= create_root_ns(steering
,
2648 if (!steering
->egress_root_ns
)
2651 err
= init_root_tree(steering
, &egress_root_fs
,
2652 &steering
->egress_root_ns
->ns
.node
);
2655 set_prio_attrs(steering
->egress_root_ns
);
2658 cleanup_root_ns(steering
->egress_root_ns
);
2659 steering
->egress_root_ns
= NULL
;
2663 int mlx5_init_fs(struct mlx5_core_dev
*dev
)
2665 struct mlx5_flow_steering
*steering
;
2668 err
= mlx5_init_fc_stats(dev
);
2672 steering
= kzalloc(sizeof(*steering
), GFP_KERNEL
);
2675 steering
->dev
= dev
;
2676 dev
->priv
.steering
= steering
;
2678 steering
->fgs_cache
= kmem_cache_create("mlx5_fs_fgs",
2679 sizeof(struct mlx5_flow_group
), 0,
2681 steering
->ftes_cache
= kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte
), 0,
2683 if (!steering
->ftes_cache
|| !steering
->fgs_cache
) {
2688 if ((((MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
) &&
2689 (MLX5_CAP_GEN(dev
, nic_flow_table
))) ||
2690 ((MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_IB
) &&
2691 MLX5_CAP_GEN(dev
, ipoib_enhanced_offloads
))) &&
2692 MLX5_CAP_FLOWTABLE_NIC_RX(dev
, ft_support
)) {
2693 err
= init_root_ns(steering
);
2698 if (MLX5_ESWITCH_MANAGER(dev
)) {
2699 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, ft_support
)) {
2700 err
= init_fdb_root_ns(steering
);
2704 if (MLX5_CAP_ESW_EGRESS_ACL(dev
, ft_support
)) {
2705 err
= init_egress_acls_root_ns(dev
);
2709 if (MLX5_CAP_ESW_INGRESS_ACL(dev
, ft_support
)) {
2710 err
= init_ingress_acls_root_ns(dev
);
2716 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev
, ft_support
)) {
2717 err
= init_sniffer_rx_root_ns(steering
);
2722 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev
, ft_support
)) {
2723 err
= init_sniffer_tx_root_ns(steering
);
2728 if (MLX5_IPSEC_DEV(dev
) || MLX5_CAP_FLOWTABLE_NIC_TX(dev
, ft_support
)) {
2729 err
= init_egress_root_ns(steering
);
2736 mlx5_cleanup_fs(dev
);
2740 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev
*dev
, u32 underlay_qpn
)
2742 struct mlx5_flow_root_namespace
*root
= dev
->priv
.steering
->root_ns
;
2743 struct mlx5_ft_underlay_qp
*new_uqp
;
2746 new_uqp
= kzalloc(sizeof(*new_uqp
), GFP_KERNEL
);
2750 mutex_lock(&root
->chain_lock
);
2752 if (!root
->root_ft
) {
2754 goto update_ft_fail
;
2757 err
= root
->cmds
->update_root_ft(dev
, root
->root_ft
, underlay_qpn
,
2760 mlx5_core_warn(dev
, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2762 goto update_ft_fail
;
2765 new_uqp
->qpn
= underlay_qpn
;
2766 list_add_tail(&new_uqp
->list
, &root
->underlay_qpns
);
2768 mutex_unlock(&root
->chain_lock
);
2773 mutex_unlock(&root
->chain_lock
);
2777 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn
);
2779 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev
*dev
, u32 underlay_qpn
)
2781 struct mlx5_flow_root_namespace
*root
= dev
->priv
.steering
->root_ns
;
2782 struct mlx5_ft_underlay_qp
*uqp
;
2786 mutex_lock(&root
->chain_lock
);
2787 list_for_each_entry(uqp
, &root
->underlay_qpns
, list
) {
2788 if (uqp
->qpn
== underlay_qpn
) {
2795 mlx5_core_warn(dev
, "Failed finding underlay qp (%u) in qpn list\n",
2801 err
= root
->cmds
->update_root_ft(dev
, root
->root_ft
, underlay_qpn
,
2804 mlx5_core_warn(dev
, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2807 list_del(&uqp
->list
);
2808 mutex_unlock(&root
->chain_lock
);
2814 mutex_unlock(&root
->chain_lock
);
2817 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn
);