]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35
36 #include "mlx5_core.h"
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39
40 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
41 sizeof(struct init_tree_node))
42
43 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
44 ...) {.type = FS_TYPE_PRIO,\
45 .min_ft_level = min_level_val,\
46 .num_levels = num_levels_val,\
47 .num_leaf_prios = num_prios_val,\
48 .caps = caps_val,\
49 .children = (struct init_tree_node[]) {__VA_ARGS__},\
50 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
51 }
52
53 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
54 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
55 __VA_ARGS__)\
56
57 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
58 .children = (struct init_tree_node[]) {__VA_ARGS__},\
59 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
60 }
61
62 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
63 sizeof(long))
64
65 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
66
67 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
68 .caps = (long[]) {__VA_ARGS__} }
69
70 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
71 FS_CAP(flow_table_properties_nic_receive.modify_root), \
72 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
73 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
74
75 #define LEFTOVERS_NUM_LEVELS 1
76 #define LEFTOVERS_NUM_PRIOS 1
77
78 #define BY_PASS_PRIO_NUM_LEVELS 1
79 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
80 LEFTOVERS_NUM_PRIOS)
81
82 #define ETHTOOL_PRIO_NUM_LEVELS 1
83 #define ETHTOOL_NUM_PRIOS 11
84 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
85 /* Vlan, mac, ttc, aRFS */
86 #define KERNEL_NIC_PRIO_NUM_LEVELS 4
87 #define KERNEL_NIC_NUM_PRIOS 1
88 /* One more level for tc */
89 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
90
91 #define ANCHOR_NUM_LEVELS 1
92 #define ANCHOR_NUM_PRIOS 1
93 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
94
95 #define OFFLOADS_MAX_FT 1
96 #define OFFLOADS_NUM_PRIOS 1
97 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
98
99 #define LAG_PRIO_NUM_LEVELS 1
100 #define LAG_NUM_PRIOS 1
101 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
102
103 struct node_caps {
104 size_t arr_sz;
105 long *caps;
106 };
107 static struct init_tree_node {
108 enum fs_node_type type;
109 struct init_tree_node *children;
110 int ar_size;
111 struct node_caps caps;
112 int min_ft_level;
113 int num_leaf_prios;
114 int prio;
115 int num_levels;
116 } root_fs = {
117 .type = FS_TYPE_NAMESPACE,
118 .ar_size = 7,
119 .children = (struct init_tree_node[]) {
120 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
121 FS_CHAINING_CAPS,
122 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
123 BY_PASS_PRIO_NUM_LEVELS))),
124 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
125 FS_CHAINING_CAPS,
126 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
127 LAG_PRIO_NUM_LEVELS))),
128 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
129 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
130 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
131 FS_CHAINING_CAPS,
132 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
133 ETHTOOL_PRIO_NUM_LEVELS))),
134 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
135 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
136 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
137 KERNEL_NIC_PRIO_NUM_LEVELS))),
138 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
139 FS_CHAINING_CAPS,
140 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
141 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
142 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
143 }
144 };
145
146 enum fs_i_mutex_lock_class {
147 FS_MUTEX_GRANDPARENT,
148 FS_MUTEX_PARENT,
149 FS_MUTEX_CHILD
150 };
151
152 static void del_rule(struct fs_node *node);
153 static void del_flow_table(struct fs_node *node);
154 static void del_flow_group(struct fs_node *node);
155 static void del_fte(struct fs_node *node);
156 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
157 struct mlx5_flow_destination *d2);
158 static struct mlx5_flow_rule *
159 find_flow_rule(struct fs_fte *fte,
160 struct mlx5_flow_destination *dest);
161
162 static void tree_init_node(struct fs_node *node,
163 unsigned int refcount,
164 void (*remove_func)(struct fs_node *))
165 {
166 atomic_set(&node->refcount, refcount);
167 INIT_LIST_HEAD(&node->list);
168 INIT_LIST_HEAD(&node->children);
169 mutex_init(&node->lock);
170 node->remove_func = remove_func;
171 }
172
173 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
174 {
175 if (parent)
176 atomic_inc(&parent->refcount);
177 node->parent = parent;
178
179 /* Parent is the root */
180 if (!parent)
181 node->root = node;
182 else
183 node->root = parent->root;
184 }
185
186 static void tree_get_node(struct fs_node *node)
187 {
188 atomic_inc(&node->refcount);
189 }
190
191 static void nested_lock_ref_node(struct fs_node *node,
192 enum fs_i_mutex_lock_class class)
193 {
194 if (node) {
195 mutex_lock_nested(&node->lock, class);
196 atomic_inc(&node->refcount);
197 }
198 }
199
200 static void lock_ref_node(struct fs_node *node)
201 {
202 if (node) {
203 mutex_lock(&node->lock);
204 atomic_inc(&node->refcount);
205 }
206 }
207
208 static void unlock_ref_node(struct fs_node *node)
209 {
210 if (node) {
211 atomic_dec(&node->refcount);
212 mutex_unlock(&node->lock);
213 }
214 }
215
216 static void tree_put_node(struct fs_node *node)
217 {
218 struct fs_node *parent_node = node->parent;
219
220 lock_ref_node(parent_node);
221 if (atomic_dec_and_test(&node->refcount)) {
222 if (parent_node)
223 list_del_init(&node->list);
224 if (node->remove_func)
225 node->remove_func(node);
226 kfree(node);
227 node = NULL;
228 }
229 unlock_ref_node(parent_node);
230 if (!node && parent_node)
231 tree_put_node(parent_node);
232 }
233
234 static int tree_remove_node(struct fs_node *node)
235 {
236 if (atomic_read(&node->refcount) > 1) {
237 atomic_dec(&node->refcount);
238 return -EEXIST;
239 }
240 tree_put_node(node);
241 return 0;
242 }
243
244 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
245 unsigned int prio)
246 {
247 struct fs_prio *iter_prio;
248
249 fs_for_each_prio(iter_prio, ns) {
250 if (iter_prio->prio == prio)
251 return iter_prio;
252 }
253
254 return NULL;
255 }
256
257 static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
258 {
259 unsigned int i;
260
261 for (i = 0; i < size; i++, mask++, val1++, val2++)
262 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
263 ((*(u8 *)val2) & (*(u8 *)mask)))
264 return false;
265
266 return true;
267 }
268
269 static bool compare_match_value(struct mlx5_flow_group_mask *mask,
270 void *fte_param1, void *fte_param2)
271 {
272 if (mask->match_criteria_enable &
273 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
274 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
275 fte_param1, outer_headers);
276 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
277 fte_param2, outer_headers);
278 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
279 mask->match_criteria, outer_headers);
280
281 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
282 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
283 return false;
284 }
285
286 if (mask->match_criteria_enable &
287 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
288 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
289 fte_param1, misc_parameters);
290 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
291 fte_param2, misc_parameters);
292 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
293 mask->match_criteria, misc_parameters);
294
295 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
296 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
297 return false;
298 }
299
300 if (mask->match_criteria_enable &
301 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
302 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
303 fte_param1, inner_headers);
304 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
305 fte_param2, inner_headers);
306 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
307 mask->match_criteria, inner_headers);
308
309 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
310 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
311 return false;
312 }
313 return true;
314 }
315
316 static bool compare_match_criteria(u8 match_criteria_enable1,
317 u8 match_criteria_enable2,
318 void *mask1, void *mask2)
319 {
320 return match_criteria_enable1 == match_criteria_enable2 &&
321 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
322 }
323
324 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
325 {
326 struct fs_node *root;
327 struct mlx5_flow_namespace *ns;
328
329 root = node->root;
330
331 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
332 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
333 return NULL;
334 }
335
336 ns = container_of(root, struct mlx5_flow_namespace, node);
337 return container_of(ns, struct mlx5_flow_root_namespace, ns);
338 }
339
340 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
341 {
342 struct mlx5_flow_root_namespace *root = find_root(node);
343
344 if (root)
345 return root->dev;
346 return NULL;
347 }
348
349 static void del_flow_table(struct fs_node *node)
350 {
351 struct mlx5_flow_table *ft;
352 struct mlx5_core_dev *dev;
353 struct fs_prio *prio;
354 int err;
355
356 fs_get_obj(ft, node);
357 dev = get_dev(&ft->node);
358
359 err = mlx5_cmd_destroy_flow_table(dev, ft);
360 if (err)
361 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
362 fs_get_obj(prio, ft->node.parent);
363 prio->num_ft--;
364 }
365
366 static void del_rule(struct fs_node *node)
367 {
368 struct mlx5_flow_rule *rule;
369 struct mlx5_flow_table *ft;
370 struct mlx5_flow_group *fg;
371 struct fs_fte *fte;
372 u32 *match_value;
373 int modify_mask;
374 struct mlx5_core_dev *dev = get_dev(node);
375 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
376 int err;
377 bool update_fte = false;
378
379 match_value = mlx5_vzalloc(match_len);
380 if (!match_value) {
381 mlx5_core_warn(dev, "failed to allocate inbox\n");
382 return;
383 }
384
385 fs_get_obj(rule, node);
386 fs_get_obj(fte, rule->node.parent);
387 fs_get_obj(fg, fte->node.parent);
388 memcpy(match_value, fte->val, sizeof(fte->val));
389 fs_get_obj(ft, fg->node.parent);
390 list_del(&rule->node.list);
391 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
392 mutex_lock(&rule->dest_attr.ft->lock);
393 list_del(&rule->next_ft);
394 mutex_unlock(&rule->dest_attr.ft->lock);
395 }
396
397 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
398 --fte->dests_size) {
399 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
400 fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
401 update_fte = true;
402 goto out;
403 }
404
405 if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
406 --fte->dests_size) {
407 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
408 update_fte = true;
409 }
410 out:
411 if (update_fte && fte->dests_size) {
412 err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
413 if (err)
414 mlx5_core_warn(dev,
415 "%s can't del rule fg id=%d fte_index=%d\n",
416 __func__, fg->id, fte->index);
417 }
418 kvfree(match_value);
419 }
420
421 static void del_fte(struct fs_node *node)
422 {
423 struct mlx5_flow_table *ft;
424 struct mlx5_flow_group *fg;
425 struct mlx5_core_dev *dev;
426 struct fs_fte *fte;
427 int err;
428
429 fs_get_obj(fte, node);
430 fs_get_obj(fg, fte->node.parent);
431 fs_get_obj(ft, fg->node.parent);
432
433 dev = get_dev(&ft->node);
434 err = mlx5_cmd_delete_fte(dev, ft,
435 fte->index);
436 if (err)
437 mlx5_core_warn(dev,
438 "flow steering can't delete fte in index %d of flow group id %d\n",
439 fte->index, fg->id);
440
441 fte->status = 0;
442 fg->num_ftes--;
443 }
444
445 static void del_flow_group(struct fs_node *node)
446 {
447 struct mlx5_flow_group *fg;
448 struct mlx5_flow_table *ft;
449 struct mlx5_core_dev *dev;
450
451 fs_get_obj(fg, node);
452 fs_get_obj(ft, fg->node.parent);
453 dev = get_dev(&ft->node);
454
455 if (ft->autogroup.active)
456 ft->autogroup.num_groups--;
457
458 if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
459 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
460 fg->id, ft->id);
461 }
462
463 static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
464 u32 *match_value,
465 unsigned int index)
466 {
467 struct fs_fte *fte;
468
469 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
470 if (!fte)
471 return ERR_PTR(-ENOMEM);
472
473 memcpy(fte->val, match_value, sizeof(fte->val));
474 fte->node.type = FS_TYPE_FLOW_ENTRY;
475 fte->flow_tag = flow_act->flow_tag;
476 fte->index = index;
477 fte->action = flow_act->action;
478 fte->encap_id = flow_act->encap_id;
479
480 return fte;
481 }
482
483 static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
484 {
485 struct mlx5_flow_group *fg;
486 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
487 create_fg_in, match_criteria);
488 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
489 create_fg_in,
490 match_criteria_enable);
491 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
492 if (!fg)
493 return ERR_PTR(-ENOMEM);
494
495 fg->mask.match_criteria_enable = match_criteria_enable;
496 memcpy(&fg->mask.match_criteria, match_criteria,
497 sizeof(fg->mask.match_criteria));
498 fg->node.type = FS_TYPE_FLOW_GROUP;
499 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
500 start_flow_index);
501 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
502 end_flow_index) - fg->start_index + 1;
503 return fg;
504 }
505
506 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
507 enum fs_flow_table_type table_type,
508 enum fs_flow_table_op_mod op_mod,
509 u32 flags)
510 {
511 struct mlx5_flow_table *ft;
512
513 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
514 if (!ft)
515 return NULL;
516
517 ft->level = level;
518 ft->node.type = FS_TYPE_FLOW_TABLE;
519 ft->op_mod = op_mod;
520 ft->type = table_type;
521 ft->vport = vport;
522 ft->max_fte = max_fte;
523 ft->flags = flags;
524 INIT_LIST_HEAD(&ft->fwd_rules);
525 mutex_init(&ft->lock);
526
527 return ft;
528 }
529
530 /* If reverse is false, then we search for the first flow table in the
531 * root sub-tree from start(closest from right), else we search for the
532 * last flow table in the root sub-tree till start(closest from left).
533 */
534 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
535 struct list_head *start,
536 bool reverse)
537 {
538 #define list_advance_entry(pos, reverse) \
539 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
540
541 #define list_for_each_advance_continue(pos, head, reverse) \
542 for (pos = list_advance_entry(pos, reverse); \
543 &pos->list != (head); \
544 pos = list_advance_entry(pos, reverse))
545
546 struct fs_node *iter = list_entry(start, struct fs_node, list);
547 struct mlx5_flow_table *ft = NULL;
548
549 if (!root)
550 return NULL;
551
552 list_for_each_advance_continue(iter, &root->children, reverse) {
553 if (iter->type == FS_TYPE_FLOW_TABLE) {
554 fs_get_obj(ft, iter);
555 return ft;
556 }
557 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
558 if (ft)
559 return ft;
560 }
561
562 return ft;
563 }
564
565 /* If reverse if false then return the first flow table in next priority of
566 * prio in the tree, else return the last flow table in the previous priority
567 * of prio in the tree.
568 */
569 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
570 {
571 struct mlx5_flow_table *ft = NULL;
572 struct fs_node *curr_node;
573 struct fs_node *parent;
574
575 parent = prio->node.parent;
576 curr_node = &prio->node;
577 while (!ft && parent) {
578 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
579 curr_node = parent;
580 parent = curr_node->parent;
581 }
582 return ft;
583 }
584
585 /* Assuming all the tree is locked by mutex chain lock */
586 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
587 {
588 return find_closest_ft(prio, false);
589 }
590
591 /* Assuming all the tree is locked by mutex chain lock */
592 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
593 {
594 return find_closest_ft(prio, true);
595 }
596
597 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
598 struct fs_prio *prio,
599 struct mlx5_flow_table *ft)
600 {
601 struct mlx5_flow_table *iter;
602 int i = 0;
603 int err;
604
605 fs_for_each_ft(iter, prio) {
606 i++;
607 err = mlx5_cmd_modify_flow_table(dev,
608 iter,
609 ft);
610 if (err) {
611 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
612 iter->id);
613 /* The driver is out of sync with the FW */
614 if (i > 1)
615 WARN_ON(true);
616 return err;
617 }
618 }
619 return 0;
620 }
621
622 /* Connect flow tables from previous priority of prio to ft */
623 static int connect_prev_fts(struct mlx5_core_dev *dev,
624 struct mlx5_flow_table *ft,
625 struct fs_prio *prio)
626 {
627 struct mlx5_flow_table *prev_ft;
628
629 prev_ft = find_prev_chained_ft(prio);
630 if (prev_ft) {
631 struct fs_prio *prev_prio;
632
633 fs_get_obj(prev_prio, prev_ft->node.parent);
634 return connect_fts_in_prio(dev, prev_prio, ft);
635 }
636 return 0;
637 }
638
639 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
640 *prio)
641 {
642 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
643 int min_level = INT_MAX;
644 int err;
645
646 if (root->root_ft)
647 min_level = root->root_ft->level;
648
649 if (ft->level >= min_level)
650 return 0;
651
652 err = mlx5_cmd_update_root_ft(root->dev, ft);
653 if (err)
654 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
655 ft->id);
656 else
657 root->root_ft = ft;
658
659 return err;
660 }
661
662 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
663 struct mlx5_flow_destination *dest)
664 {
665 struct mlx5_flow_table *ft;
666 struct mlx5_flow_group *fg;
667 struct fs_fte *fte;
668 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
669 int err = 0;
670
671 fs_get_obj(fte, rule->node.parent);
672 if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
673 return -EINVAL;
674 lock_ref_node(&fte->node);
675 fs_get_obj(fg, fte->node.parent);
676 fs_get_obj(ft, fg->node.parent);
677
678 memcpy(&rule->dest_attr, dest, sizeof(*dest));
679 err = mlx5_cmd_update_fte(get_dev(&ft->node),
680 ft, fg->id,
681 modify_mask,
682 fte);
683 unlock_ref_node(&fte->node);
684
685 return err;
686 }
687
688 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
689 struct mlx5_flow_destination *new_dest,
690 struct mlx5_flow_destination *old_dest)
691 {
692 int i;
693
694 if (!old_dest) {
695 if (handle->num_rules != 1)
696 return -EINVAL;
697 return _mlx5_modify_rule_destination(handle->rule[0],
698 new_dest);
699 }
700
701 for (i = 0; i < handle->num_rules; i++) {
702 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
703 return _mlx5_modify_rule_destination(handle->rule[i],
704 new_dest);
705 }
706
707 return -EINVAL;
708 }
709
710 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
711 static int connect_fwd_rules(struct mlx5_core_dev *dev,
712 struct mlx5_flow_table *new_next_ft,
713 struct mlx5_flow_table *old_next_ft)
714 {
715 struct mlx5_flow_destination dest;
716 struct mlx5_flow_rule *iter;
717 int err = 0;
718
719 /* new_next_ft and old_next_ft could be NULL only
720 * when we create/destroy the anchor flow table.
721 */
722 if (!new_next_ft || !old_next_ft)
723 return 0;
724
725 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
726 dest.ft = new_next_ft;
727
728 mutex_lock(&old_next_ft->lock);
729 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
730 mutex_unlock(&old_next_ft->lock);
731 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
732 err = _mlx5_modify_rule_destination(iter, &dest);
733 if (err)
734 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
735 new_next_ft->id);
736 }
737 return 0;
738 }
739
740 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
741 struct fs_prio *prio)
742 {
743 struct mlx5_flow_table *next_ft;
744 int err = 0;
745
746 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
747
748 if (list_empty(&prio->node.children)) {
749 err = connect_prev_fts(dev, ft, prio);
750 if (err)
751 return err;
752
753 next_ft = find_next_chained_ft(prio);
754 err = connect_fwd_rules(dev, ft, next_ft);
755 if (err)
756 return err;
757 }
758
759 if (MLX5_CAP_FLOWTABLE(dev,
760 flow_table_properties_nic_receive.modify_root))
761 err = update_root_ft_create(ft, prio);
762 return err;
763 }
764
765 static void list_add_flow_table(struct mlx5_flow_table *ft,
766 struct fs_prio *prio)
767 {
768 struct list_head *prev = &prio->node.children;
769 struct mlx5_flow_table *iter;
770
771 fs_for_each_ft(iter, prio) {
772 if (iter->level > ft->level)
773 break;
774 prev = &iter->node.list;
775 }
776 list_add(&ft->node.list, prev);
777 }
778
779 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
780 enum fs_flow_table_op_mod op_mod,
781 u16 vport, int prio,
782 int max_fte, u32 level,
783 u32 flags)
784 {
785 struct mlx5_flow_table *next_ft = NULL;
786 struct mlx5_flow_table *ft;
787 int err;
788 int log_table_sz;
789 struct mlx5_flow_root_namespace *root =
790 find_root(&ns->node);
791 struct fs_prio *fs_prio = NULL;
792
793 if (!root) {
794 pr_err("mlx5: flow steering failed to find root of namespace\n");
795 return ERR_PTR(-ENODEV);
796 }
797
798 mutex_lock(&root->chain_lock);
799 fs_prio = find_prio(ns, prio);
800 if (!fs_prio) {
801 err = -EINVAL;
802 goto unlock_root;
803 }
804 if (level >= fs_prio->num_levels) {
805 err = -ENOSPC;
806 goto unlock_root;
807 }
808 /* The level is related to the
809 * priority level range.
810 */
811 level += fs_prio->start_level;
812 ft = alloc_flow_table(level,
813 vport,
814 max_fte ? roundup_pow_of_two(max_fte) : 0,
815 root->table_type,
816 op_mod, flags);
817 if (!ft) {
818 err = -ENOMEM;
819 goto unlock_root;
820 }
821
822 tree_init_node(&ft->node, 1, del_flow_table);
823 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
824 next_ft = find_next_chained_ft(fs_prio);
825 err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
826 ft->level, log_table_sz, next_ft, &ft->id,
827 ft->flags);
828 if (err)
829 goto free_ft;
830
831 err = connect_flow_table(root->dev, ft, fs_prio);
832 if (err)
833 goto destroy_ft;
834 lock_ref_node(&fs_prio->node);
835 tree_add_node(&ft->node, &fs_prio->node);
836 list_add_flow_table(ft, fs_prio);
837 fs_prio->num_ft++;
838 unlock_ref_node(&fs_prio->node);
839 mutex_unlock(&root->chain_lock);
840 return ft;
841 destroy_ft:
842 mlx5_cmd_destroy_flow_table(root->dev, ft);
843 free_ft:
844 kfree(ft);
845 unlock_root:
846 mutex_unlock(&root->chain_lock);
847 return ERR_PTR(err);
848 }
849
850 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
851 int prio, int max_fte,
852 u32 level,
853 u32 flags)
854 {
855 return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
856 max_fte, level, flags);
857 }
858
859 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
860 int prio, int max_fte,
861 u32 level, u16 vport)
862 {
863 return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
864 max_fte, level, 0);
865 }
866
867 struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
868 struct mlx5_flow_namespace *ns,
869 int prio, u32 level)
870 {
871 return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
872 level, 0);
873 }
874 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
875
876 struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
877 int prio,
878 int num_flow_table_entries,
879 int max_num_groups,
880 u32 level,
881 u32 flags)
882 {
883 struct mlx5_flow_table *ft;
884
885 if (max_num_groups > num_flow_table_entries)
886 return ERR_PTR(-EINVAL);
887
888 ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags);
889 if (IS_ERR(ft))
890 return ft;
891
892 ft->autogroup.active = true;
893 ft->autogroup.required_groups = max_num_groups;
894
895 return ft;
896 }
897 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
898
899 /* Flow table should be locked */
900 static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
901 u32 *fg_in,
902 struct list_head
903 *prev_fg,
904 bool is_auto_fg)
905 {
906 struct mlx5_flow_group *fg;
907 struct mlx5_core_dev *dev = get_dev(&ft->node);
908 int err;
909
910 if (!dev)
911 return ERR_PTR(-ENODEV);
912
913 fg = alloc_flow_group(fg_in);
914 if (IS_ERR(fg))
915 return fg;
916
917 err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
918 if (err) {
919 kfree(fg);
920 return ERR_PTR(err);
921 }
922
923 if (ft->autogroup.active)
924 ft->autogroup.num_groups++;
925 /* Add node to tree */
926 tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
927 tree_add_node(&fg->node, &ft->node);
928 /* Add node to group list */
929 list_add(&fg->node.list, prev_fg);
930
931 return fg;
932 }
933
934 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
935 u32 *fg_in)
936 {
937 struct mlx5_flow_group *fg;
938
939 if (ft->autogroup.active)
940 return ERR_PTR(-EPERM);
941
942 lock_ref_node(&ft->node);
943 fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
944 unlock_ref_node(&ft->node);
945
946 return fg;
947 }
948
949 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
950 {
951 struct mlx5_flow_rule *rule;
952
953 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
954 if (!rule)
955 return NULL;
956
957 INIT_LIST_HEAD(&rule->next_ft);
958 rule->node.type = FS_TYPE_FLOW_DEST;
959 if (dest)
960 memcpy(&rule->dest_attr, dest, sizeof(*dest));
961
962 return rule;
963 }
964
965 static struct mlx5_flow_handle *alloc_handle(int num_rules)
966 {
967 struct mlx5_flow_handle *handle;
968
969 handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) *
970 num_rules, GFP_KERNEL);
971 if (!handle)
972 return NULL;
973
974 handle->num_rules = num_rules;
975
976 return handle;
977 }
978
979 static void destroy_flow_handle(struct fs_fte *fte,
980 struct mlx5_flow_handle *handle,
981 struct mlx5_flow_destination *dest,
982 int i)
983 {
984 for (; --i >= 0;) {
985 if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
986 fte->dests_size--;
987 list_del(&handle->rule[i]->node.list);
988 kfree(handle->rule[i]);
989 }
990 }
991 kfree(handle);
992 }
993
994 static struct mlx5_flow_handle *
995 create_flow_handle(struct fs_fte *fte,
996 struct mlx5_flow_destination *dest,
997 int dest_num,
998 int *modify_mask,
999 bool *new_rule)
1000 {
1001 struct mlx5_flow_handle *handle;
1002 struct mlx5_flow_rule *rule = NULL;
1003 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1004 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1005 int type;
1006 int i = 0;
1007
1008 handle = alloc_handle((dest_num) ? dest_num : 1);
1009 if (!handle)
1010 return ERR_PTR(-ENOMEM);
1011
1012 do {
1013 if (dest) {
1014 rule = find_flow_rule(fte, dest + i);
1015 if (rule) {
1016 atomic_inc(&rule->node.refcount);
1017 goto rule_found;
1018 }
1019 }
1020
1021 *new_rule = true;
1022 rule = alloc_rule(dest + i);
1023 if (!rule)
1024 goto free_rules;
1025
1026 /* Add dest to dests list- we need flow tables to be in the
1027 * end of the list for forward to next prio rules.
1028 */
1029 tree_init_node(&rule->node, 1, del_rule);
1030 if (dest &&
1031 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1032 list_add(&rule->node.list, &fte->node.children);
1033 else
1034 list_add_tail(&rule->node.list, &fte->node.children);
1035 if (dest) {
1036 fte->dests_size++;
1037
1038 type = dest[i].type ==
1039 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1040 *modify_mask |= type ? count : dst;
1041 }
1042 rule_found:
1043 handle->rule[i] = rule;
1044 } while (++i < dest_num);
1045
1046 return handle;
1047
1048 free_rules:
1049 destroy_flow_handle(fte, handle, dest, i);
1050 return ERR_PTR(-ENOMEM);
1051 }
1052
1053 /* fte should not be deleted while calling this function */
1054 static struct mlx5_flow_handle *
1055 add_rule_fte(struct fs_fte *fte,
1056 struct mlx5_flow_group *fg,
1057 struct mlx5_flow_destination *dest,
1058 int dest_num,
1059 bool update_action)
1060 {
1061 struct mlx5_flow_handle *handle;
1062 struct mlx5_flow_table *ft;
1063 int modify_mask = 0;
1064 int err;
1065 bool new_rule = false;
1066
1067 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1068 &new_rule);
1069 if (IS_ERR(handle) || !new_rule)
1070 goto out;
1071
1072 if (update_action)
1073 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1074
1075 fs_get_obj(ft, fg->node.parent);
1076 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1077 err = mlx5_cmd_create_fte(get_dev(&ft->node),
1078 ft, fg->id, fte);
1079 else
1080 err = mlx5_cmd_update_fte(get_dev(&ft->node),
1081 ft, fg->id, modify_mask, fte);
1082 if (err)
1083 goto free_handle;
1084
1085 fte->status |= FS_FTE_STATUS_EXISTING;
1086
1087 out:
1088 return handle;
1089
1090 free_handle:
1091 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1092 return ERR_PTR(err);
1093 }
1094
1095 /* Assumed fg is locked */
1096 static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
1097 struct list_head **prev)
1098 {
1099 struct fs_fte *fte;
1100 unsigned int start = fg->start_index;
1101
1102 if (prev)
1103 *prev = &fg->node.children;
1104
1105 /* assumed list is sorted by index */
1106 fs_for_each_fte(fte, fg) {
1107 if (fte->index != start)
1108 return start;
1109 start++;
1110 if (prev)
1111 *prev = &fte->node.list;
1112 }
1113
1114 return start;
1115 }
1116
1117 /* prev is output, prev->next = new_fte */
1118 static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
1119 u32 *match_value,
1120 struct mlx5_flow_act *flow_act,
1121 struct list_head **prev)
1122 {
1123 struct fs_fte *fte;
1124 int index;
1125
1126 index = get_free_fte_index(fg, prev);
1127 fte = alloc_fte(flow_act, match_value, index);
1128 if (IS_ERR(fte))
1129 return fte;
1130
1131 return fte;
1132 }
1133
1134 static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1135 u8 match_criteria_enable,
1136 u32 *match_criteria)
1137 {
1138 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1139 struct list_head *prev = ft->node.children.prev;
1140 unsigned int candidate_index = 0;
1141 struct mlx5_flow_group *fg;
1142 void *match_criteria_addr;
1143 unsigned int group_size = 0;
1144 u32 *in;
1145
1146 if (!ft->autogroup.active)
1147 return ERR_PTR(-ENOENT);
1148
1149 in = mlx5_vzalloc(inlen);
1150 if (!in)
1151 return ERR_PTR(-ENOMEM);
1152
1153 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1154 /* We save place for flow groups in addition to max types */
1155 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1156
1157 /* ft->max_fte == ft->autogroup.max_types */
1158 if (group_size == 0)
1159 group_size = 1;
1160
1161 /* sorted by start_index */
1162 fs_for_each_fg(fg, ft) {
1163 if (candidate_index + group_size > fg->start_index)
1164 candidate_index = fg->start_index + fg->max_ftes;
1165 else
1166 break;
1167 prev = &fg->node.list;
1168 }
1169
1170 if (candidate_index + group_size > ft->max_fte) {
1171 fg = ERR_PTR(-ENOSPC);
1172 goto out;
1173 }
1174
1175 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1176 match_criteria_enable);
1177 MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
1178 MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
1179 group_size - 1);
1180 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1181 in, match_criteria);
1182 memcpy(match_criteria_addr, match_criteria,
1183 MLX5_ST_SZ_BYTES(fte_match_param));
1184
1185 fg = create_flow_group_common(ft, in, prev, true);
1186 out:
1187 kvfree(in);
1188 return fg;
1189 }
1190
1191 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1192 struct mlx5_flow_destination *d2)
1193 {
1194 if (d1->type == d2->type) {
1195 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1196 d1->vport_num == d2->vport_num) ||
1197 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1198 d1->ft == d2->ft) ||
1199 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1200 d1->tir_num == d2->tir_num))
1201 return true;
1202 }
1203
1204 return false;
1205 }
1206
1207 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1208 struct mlx5_flow_destination *dest)
1209 {
1210 struct mlx5_flow_rule *rule;
1211
1212 list_for_each_entry(rule, &fte->node.children, node.list) {
1213 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1214 return rule;
1215 }
1216 return NULL;
1217 }
1218
1219 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1220 u32 *match_value,
1221 struct mlx5_flow_act *flow_act,
1222 struct mlx5_flow_destination *dest,
1223 int dest_num)
1224 {
1225 struct mlx5_flow_handle *handle;
1226 struct mlx5_flow_table *ft;
1227 struct list_head *prev;
1228 struct fs_fte *fte;
1229 int i;
1230
1231 nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
1232 fs_for_each_fte(fte, fg) {
1233 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
1234 if (compare_match_value(&fg->mask, match_value, &fte->val) &&
1235 (flow_act->action & fte->action)) {
1236 int old_action = fte->action;
1237
1238 if (fte->flow_tag != flow_act->flow_tag) {
1239 mlx5_core_warn(get_dev(&fte->node),
1240 "FTE flow tag %u already exists with different flow tag %u\n",
1241 fte->flow_tag,
1242 flow_act->flow_tag);
1243 handle = ERR_PTR(-EEXIST);
1244 goto unlock_fte;
1245 }
1246
1247 fte->action |= flow_act->action;
1248 handle = add_rule_fte(fte, fg, dest, dest_num,
1249 old_action != flow_act->action);
1250 if (IS_ERR(handle)) {
1251 fte->action = old_action;
1252 goto unlock_fte;
1253 } else {
1254 goto add_rules;
1255 }
1256 }
1257 unlock_ref_node(&fte->node);
1258 }
1259 fs_get_obj(ft, fg->node.parent);
1260 if (fg->num_ftes >= fg->max_ftes) {
1261 handle = ERR_PTR(-ENOSPC);
1262 goto unlock_fg;
1263 }
1264
1265 fte = create_fte(fg, match_value, flow_act, &prev);
1266 if (IS_ERR(fte)) {
1267 handle = (void *)fte;
1268 goto unlock_fg;
1269 }
1270 tree_init_node(&fte->node, 0, del_fte);
1271 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
1272 handle = add_rule_fte(fte, fg, dest, dest_num, false);
1273 if (IS_ERR(handle)) {
1274 unlock_ref_node(&fte->node);
1275 kfree(fte);
1276 goto unlock_fg;
1277 }
1278
1279 fg->num_ftes++;
1280
1281 tree_add_node(&fte->node, &fg->node);
1282 list_add(&fte->node.list, prev);
1283 add_rules:
1284 for (i = 0; i < handle->num_rules; i++) {
1285 if (atomic_read(&handle->rule[i]->node.refcount) == 1)
1286 tree_add_node(&handle->rule[i]->node, &fte->node);
1287 }
1288 unlock_fte:
1289 unlock_ref_node(&fte->node);
1290 unlock_fg:
1291 unlock_ref_node(&fg->node);
1292 return handle;
1293 }
1294
1295 struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
1296 {
1297 struct mlx5_flow_rule *dst;
1298 struct fs_fte *fte;
1299
1300 fs_get_obj(fte, handle->rule[0]->node.parent);
1301
1302 fs_for_each_dst(dst, fte) {
1303 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1304 return dst->dest_attr.counter;
1305 }
1306
1307 return NULL;
1308 }
1309
1310 static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1311 {
1312 if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1313 return !counter;
1314
1315 if (!counter)
1316 return false;
1317
1318 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1319 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1320 }
1321
1322 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1323 u32 action,
1324 struct mlx5_flow_table *ft)
1325 {
1326 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1327 return counter_is_valid(dest->counter, action);
1328
1329 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1330 return true;
1331
1332 if (!dest || ((dest->type ==
1333 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1334 (dest->ft->level <= ft->level)))
1335 return false;
1336 return true;
1337 }
1338
1339 static struct mlx5_flow_handle *
1340 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1341 struct mlx5_flow_spec *spec,
1342 struct mlx5_flow_act *flow_act,
1343 struct mlx5_flow_destination *dest,
1344 int dest_num)
1345
1346 {
1347 struct mlx5_flow_group *g;
1348 struct mlx5_flow_handle *rule;
1349 int i;
1350
1351 for (i = 0; i < dest_num; i++) {
1352 if (!dest_is_valid(&dest[i], flow_act->action, ft))
1353 return ERR_PTR(-EINVAL);
1354 }
1355
1356 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
1357 fs_for_each_fg(g, ft)
1358 if (compare_match_criteria(g->mask.match_criteria_enable,
1359 spec->match_criteria_enable,
1360 g->mask.match_criteria,
1361 spec->match_criteria)) {
1362 rule = add_rule_fg(g, spec->match_value,
1363 flow_act, dest, dest_num);
1364 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
1365 goto unlock;
1366 }
1367
1368 g = create_autogroup(ft, spec->match_criteria_enable,
1369 spec->match_criteria);
1370 if (IS_ERR(g)) {
1371 rule = (void *)g;
1372 goto unlock;
1373 }
1374
1375 rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
1376 if (IS_ERR(rule)) {
1377 /* Remove assumes refcount > 0 and autogroup creates a group
1378 * with a refcount = 0.
1379 */
1380 unlock_ref_node(&ft->node);
1381 tree_get_node(&g->node);
1382 tree_remove_node(&g->node);
1383 return rule;
1384 }
1385 unlock:
1386 unlock_ref_node(&ft->node);
1387 return rule;
1388 }
1389
1390 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1391 {
1392 return ((ft->type == FS_FT_NIC_RX) &&
1393 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1394 }
1395
1396 struct mlx5_flow_handle *
1397 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1398 struct mlx5_flow_spec *spec,
1399 struct mlx5_flow_act *flow_act,
1400 struct mlx5_flow_destination *dest,
1401 int dest_num)
1402 {
1403 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1404 struct mlx5_flow_destination gen_dest;
1405 struct mlx5_flow_table *next_ft = NULL;
1406 struct mlx5_flow_handle *handle = NULL;
1407 u32 sw_action = flow_act->action;
1408 struct fs_prio *prio;
1409
1410 fs_get_obj(prio, ft->node.parent);
1411 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1412 if (!fwd_next_prio_supported(ft))
1413 return ERR_PTR(-EOPNOTSUPP);
1414 if (dest)
1415 return ERR_PTR(-EINVAL);
1416 mutex_lock(&root->chain_lock);
1417 next_ft = find_next_chained_ft(prio);
1418 if (next_ft) {
1419 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1420 gen_dest.ft = next_ft;
1421 dest = &gen_dest;
1422 dest_num = 1;
1423 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1424 } else {
1425 mutex_unlock(&root->chain_lock);
1426 return ERR_PTR(-EOPNOTSUPP);
1427 }
1428 }
1429
1430 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
1431
1432 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1433 if (!IS_ERR_OR_NULL(handle) &&
1434 (list_empty(&handle->rule[0]->next_ft))) {
1435 mutex_lock(&next_ft->lock);
1436 list_add(&handle->rule[0]->next_ft,
1437 &next_ft->fwd_rules);
1438 mutex_unlock(&next_ft->lock);
1439 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1440 }
1441 mutex_unlock(&root->chain_lock);
1442 }
1443 return handle;
1444 }
1445 EXPORT_SYMBOL(mlx5_add_flow_rules);
1446
1447 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1448 {
1449 int i;
1450
1451 for (i = handle->num_rules - 1; i >= 0; i--)
1452 tree_remove_node(&handle->rule[i]->node);
1453 kfree(handle);
1454 }
1455 EXPORT_SYMBOL(mlx5_del_flow_rules);
1456
1457 /* Assuming prio->node.children(flow tables) is sorted by level */
1458 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1459 {
1460 struct fs_prio *prio;
1461
1462 fs_get_obj(prio, ft->node.parent);
1463
1464 if (!list_is_last(&ft->node.list, &prio->node.children))
1465 return list_next_entry(ft, node.list);
1466 return find_next_chained_ft(prio);
1467 }
1468
1469 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1470 {
1471 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1472 struct mlx5_flow_table *new_root_ft = NULL;
1473
1474 if (root->root_ft != ft)
1475 return 0;
1476
1477 new_root_ft = find_next_ft(ft);
1478 if (new_root_ft) {
1479 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft);
1480
1481 if (err) {
1482 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
1483 ft->id);
1484 return err;
1485 }
1486 }
1487 root->root_ft = new_root_ft;
1488 return 0;
1489 }
1490
1491 /* Connect flow table from previous priority to
1492 * the next flow table.
1493 */
1494 static int disconnect_flow_table(struct mlx5_flow_table *ft)
1495 {
1496 struct mlx5_core_dev *dev = get_dev(&ft->node);
1497 struct mlx5_flow_table *next_ft;
1498 struct fs_prio *prio;
1499 int err = 0;
1500
1501 err = update_root_ft_destroy(ft);
1502 if (err)
1503 return err;
1504
1505 fs_get_obj(prio, ft->node.parent);
1506 if (!(list_first_entry(&prio->node.children,
1507 struct mlx5_flow_table,
1508 node.list) == ft))
1509 return 0;
1510
1511 next_ft = find_next_chained_ft(prio);
1512 err = connect_fwd_rules(dev, next_ft, ft);
1513 if (err)
1514 return err;
1515
1516 err = connect_prev_fts(dev, next_ft, prio);
1517 if (err)
1518 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1519 ft->id);
1520 return err;
1521 }
1522
1523 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
1524 {
1525 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1526 int err = 0;
1527
1528 mutex_lock(&root->chain_lock);
1529 err = disconnect_flow_table(ft);
1530 if (err) {
1531 mutex_unlock(&root->chain_lock);
1532 return err;
1533 }
1534 if (tree_remove_node(&ft->node))
1535 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
1536 ft->id);
1537 mutex_unlock(&root->chain_lock);
1538
1539 return err;
1540 }
1541 EXPORT_SYMBOL(mlx5_destroy_flow_table);
1542
1543 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1544 {
1545 if (tree_remove_node(&fg->node))
1546 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
1547 fg->id);
1548 }
1549
1550 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1551 enum mlx5_flow_namespace_type type)
1552 {
1553 struct mlx5_flow_steering *steering = dev->priv.steering;
1554 struct mlx5_flow_root_namespace *root_ns;
1555 int prio;
1556 struct fs_prio *fs_prio;
1557 struct mlx5_flow_namespace *ns;
1558
1559 if (!steering)
1560 return NULL;
1561
1562 switch (type) {
1563 case MLX5_FLOW_NAMESPACE_BYPASS:
1564 case MLX5_FLOW_NAMESPACE_LAG:
1565 case MLX5_FLOW_NAMESPACE_OFFLOADS:
1566 case MLX5_FLOW_NAMESPACE_ETHTOOL:
1567 case MLX5_FLOW_NAMESPACE_KERNEL:
1568 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
1569 case MLX5_FLOW_NAMESPACE_ANCHOR:
1570 prio = type;
1571 break;
1572 case MLX5_FLOW_NAMESPACE_FDB:
1573 if (steering->fdb_root_ns)
1574 return &steering->fdb_root_ns->ns;
1575 else
1576 return NULL;
1577 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
1578 if (steering->esw_egress_root_ns)
1579 return &steering->esw_egress_root_ns->ns;
1580 else
1581 return NULL;
1582 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
1583 if (steering->esw_ingress_root_ns)
1584 return &steering->esw_ingress_root_ns->ns;
1585 else
1586 return NULL;
1587 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
1588 if (steering->sniffer_rx_root_ns)
1589 return &steering->sniffer_rx_root_ns->ns;
1590 else
1591 return NULL;
1592 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
1593 if (steering->sniffer_tx_root_ns)
1594 return &steering->sniffer_tx_root_ns->ns;
1595 else
1596 return NULL;
1597 default:
1598 return NULL;
1599 }
1600
1601 root_ns = steering->root_ns;
1602 if (!root_ns)
1603 return NULL;
1604
1605 fs_prio = find_prio(&root_ns->ns, prio);
1606 if (!fs_prio)
1607 return NULL;
1608
1609 ns = list_first_entry(&fs_prio->node.children,
1610 typeof(*ns),
1611 node.list);
1612
1613 return ns;
1614 }
1615 EXPORT_SYMBOL(mlx5_get_flow_namespace);
1616
1617 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
1618 unsigned int prio, int num_levels)
1619 {
1620 struct fs_prio *fs_prio;
1621
1622 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1623 if (!fs_prio)
1624 return ERR_PTR(-ENOMEM);
1625
1626 fs_prio->node.type = FS_TYPE_PRIO;
1627 tree_init_node(&fs_prio->node, 1, NULL);
1628 tree_add_node(&fs_prio->node, &ns->node);
1629 fs_prio->num_levels = num_levels;
1630 fs_prio->prio = prio;
1631 list_add_tail(&fs_prio->node.list, &ns->node.children);
1632
1633 return fs_prio;
1634 }
1635
1636 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1637 *ns)
1638 {
1639 ns->node.type = FS_TYPE_NAMESPACE;
1640
1641 return ns;
1642 }
1643
1644 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
1645 {
1646 struct mlx5_flow_namespace *ns;
1647
1648 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1649 if (!ns)
1650 return ERR_PTR(-ENOMEM);
1651
1652 fs_init_namespace(ns);
1653 tree_init_node(&ns->node, 1, NULL);
1654 tree_add_node(&ns->node, &prio->node);
1655 list_add_tail(&ns->node.list, &prio->node.children);
1656
1657 return ns;
1658 }
1659
1660 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
1661 struct init_tree_node *prio_metadata)
1662 {
1663 struct fs_prio *fs_prio;
1664 int i;
1665
1666 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
1667 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
1668 if (IS_ERR(fs_prio))
1669 return PTR_ERR(fs_prio);
1670 }
1671 return 0;
1672 }
1673
1674 #define FLOW_TABLE_BIT_SZ 1
1675 #define GET_FLOW_TABLE_CAP(dev, offset) \
1676 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
1677 offset / 32)) >> \
1678 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
1679 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
1680 {
1681 int i;
1682
1683 for (i = 0; i < caps->arr_sz; i++) {
1684 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
1685 return false;
1686 }
1687 return true;
1688 }
1689
1690 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
1691 struct init_tree_node *init_node,
1692 struct fs_node *fs_parent_node,
1693 struct init_tree_node *init_parent_node,
1694 int prio)
1695 {
1696 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
1697 flow_table_properties_nic_receive.
1698 max_ft_level);
1699 struct mlx5_flow_namespace *fs_ns;
1700 struct fs_prio *fs_prio;
1701 struct fs_node *base;
1702 int i;
1703 int err;
1704
1705 if (init_node->type == FS_TYPE_PRIO) {
1706 if ((init_node->min_ft_level > max_ft_level) ||
1707 !has_required_caps(steering->dev, &init_node->caps))
1708 return 0;
1709
1710 fs_get_obj(fs_ns, fs_parent_node);
1711 if (init_node->num_leaf_prios)
1712 return create_leaf_prios(fs_ns, prio, init_node);
1713 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
1714 if (IS_ERR(fs_prio))
1715 return PTR_ERR(fs_prio);
1716 base = &fs_prio->node;
1717 } else if (init_node->type == FS_TYPE_NAMESPACE) {
1718 fs_get_obj(fs_prio, fs_parent_node);
1719 fs_ns = fs_create_namespace(fs_prio);
1720 if (IS_ERR(fs_ns))
1721 return PTR_ERR(fs_ns);
1722 base = &fs_ns->node;
1723 } else {
1724 return -EINVAL;
1725 }
1726 prio = 0;
1727 for (i = 0; i < init_node->ar_size; i++) {
1728 err = init_root_tree_recursive(steering, &init_node->children[i],
1729 base, init_node, prio);
1730 if (err)
1731 return err;
1732 if (init_node->children[i].type == FS_TYPE_PRIO &&
1733 init_node->children[i].num_leaf_prios) {
1734 prio += init_node->children[i].num_leaf_prios;
1735 }
1736 }
1737
1738 return 0;
1739 }
1740
1741 static int init_root_tree(struct mlx5_flow_steering *steering,
1742 struct init_tree_node *init_node,
1743 struct fs_node *fs_parent_node)
1744 {
1745 int i;
1746 struct mlx5_flow_namespace *fs_ns;
1747 int err;
1748
1749 fs_get_obj(fs_ns, fs_parent_node);
1750 for (i = 0; i < init_node->ar_size; i++) {
1751 err = init_root_tree_recursive(steering, &init_node->children[i],
1752 &fs_ns->node,
1753 init_node, i);
1754 if (err)
1755 return err;
1756 }
1757 return 0;
1758 }
1759
1760 static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
1761 enum fs_flow_table_type
1762 table_type)
1763 {
1764 struct mlx5_flow_root_namespace *root_ns;
1765 struct mlx5_flow_namespace *ns;
1766
1767 /* Create the root namespace */
1768 root_ns = mlx5_vzalloc(sizeof(*root_ns));
1769 if (!root_ns)
1770 return NULL;
1771
1772 root_ns->dev = steering->dev;
1773 root_ns->table_type = table_type;
1774
1775 ns = &root_ns->ns;
1776 fs_init_namespace(ns);
1777 mutex_init(&root_ns->chain_lock);
1778 tree_init_node(&ns->node, 1, NULL);
1779 tree_add_node(&ns->node, NULL);
1780
1781 return root_ns;
1782 }
1783
1784 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
1785
1786 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
1787 {
1788 struct fs_prio *prio;
1789
1790 fs_for_each_prio(prio, ns) {
1791 /* This updates prio start_level and num_levels */
1792 set_prio_attrs_in_prio(prio, acc_level);
1793 acc_level += prio->num_levels;
1794 }
1795 return acc_level;
1796 }
1797
1798 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
1799 {
1800 struct mlx5_flow_namespace *ns;
1801 int acc_level_ns = acc_level;
1802
1803 prio->start_level = acc_level;
1804 fs_for_each_ns(ns, prio)
1805 /* This updates start_level and num_levels of ns's priority descendants */
1806 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
1807 if (!prio->num_levels)
1808 prio->num_levels = acc_level_ns - prio->start_level;
1809 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
1810 }
1811
1812 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
1813 {
1814 struct mlx5_flow_namespace *ns = &root_ns->ns;
1815 struct fs_prio *prio;
1816 int start_level = 0;
1817
1818 fs_for_each_prio(prio, ns) {
1819 set_prio_attrs_in_prio(prio, start_level);
1820 start_level += prio->num_levels;
1821 }
1822 }
1823
1824 #define ANCHOR_PRIO 0
1825 #define ANCHOR_SIZE 1
1826 #define ANCHOR_LEVEL 0
1827 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1828 {
1829 struct mlx5_flow_namespace *ns = NULL;
1830 struct mlx5_flow_table *ft;
1831
1832 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1833 if (WARN_ON(!ns))
1834 return -EINVAL;
1835 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
1836 if (IS_ERR(ft)) {
1837 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
1838 return PTR_ERR(ft);
1839 }
1840 return 0;
1841 }
1842
1843 static int init_root_ns(struct mlx5_flow_steering *steering)
1844 {
1845
1846 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
1847 if (!steering->root_ns)
1848 goto cleanup;
1849
1850 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
1851 goto cleanup;
1852
1853 set_prio_attrs(steering->root_ns);
1854
1855 if (create_anchor_flow_table(steering))
1856 goto cleanup;
1857
1858 return 0;
1859
1860 cleanup:
1861 mlx5_cleanup_fs(steering->dev);
1862 return -ENOMEM;
1863 }
1864
1865 static void clean_tree(struct fs_node *node)
1866 {
1867 if (node) {
1868 struct fs_node *iter;
1869 struct fs_node *temp;
1870
1871 list_for_each_entry_safe(iter, temp, &node->children, list)
1872 clean_tree(iter);
1873 tree_remove_node(node);
1874 }
1875 }
1876
1877 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
1878 {
1879 if (!root_ns)
1880 return;
1881
1882 clean_tree(&root_ns->ns.node);
1883 }
1884
1885 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1886 {
1887 struct mlx5_flow_steering *steering = dev->priv.steering;
1888
1889 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1890 return;
1891
1892 cleanup_root_ns(steering->root_ns);
1893 cleanup_root_ns(steering->esw_egress_root_ns);
1894 cleanup_root_ns(steering->esw_ingress_root_ns);
1895 cleanup_root_ns(steering->fdb_root_ns);
1896 cleanup_root_ns(steering->sniffer_rx_root_ns);
1897 cleanup_root_ns(steering->sniffer_tx_root_ns);
1898 mlx5_cleanup_fc_stats(dev);
1899 kfree(steering);
1900 }
1901
1902 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
1903 {
1904 struct fs_prio *prio;
1905
1906 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
1907 if (!steering->sniffer_tx_root_ns)
1908 return -ENOMEM;
1909
1910 /* Create single prio */
1911 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
1912 if (IS_ERR(prio)) {
1913 cleanup_root_ns(steering->sniffer_tx_root_ns);
1914 return PTR_ERR(prio);
1915 }
1916 return 0;
1917 }
1918
1919 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
1920 {
1921 struct fs_prio *prio;
1922
1923 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
1924 if (!steering->sniffer_rx_root_ns)
1925 return -ENOMEM;
1926
1927 /* Create single prio */
1928 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
1929 if (IS_ERR(prio)) {
1930 cleanup_root_ns(steering->sniffer_rx_root_ns);
1931 return PTR_ERR(prio);
1932 }
1933 return 0;
1934 }
1935
1936 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
1937 {
1938 struct fs_prio *prio;
1939
1940 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
1941 if (!steering->fdb_root_ns)
1942 return -ENOMEM;
1943
1944 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
1945 if (IS_ERR(prio))
1946 goto out_err;
1947
1948 prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
1949 if (IS_ERR(prio))
1950 goto out_err;
1951
1952 set_prio_attrs(steering->fdb_root_ns);
1953 return 0;
1954
1955 out_err:
1956 cleanup_root_ns(steering->fdb_root_ns);
1957 steering->fdb_root_ns = NULL;
1958 return PTR_ERR(prio);
1959 }
1960
1961 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
1962 {
1963 struct fs_prio *prio;
1964
1965 steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
1966 if (!steering->esw_egress_root_ns)
1967 return -ENOMEM;
1968
1969 /* create 1 prio*/
1970 prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
1971 MLX5_TOTAL_VPORTS(steering->dev));
1972 return PTR_ERR_OR_ZERO(prio);
1973 }
1974
1975 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
1976 {
1977 struct fs_prio *prio;
1978
1979 steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
1980 if (!steering->esw_ingress_root_ns)
1981 return -ENOMEM;
1982
1983 /* create 1 prio*/
1984 prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
1985 MLX5_TOTAL_VPORTS(steering->dev));
1986 return PTR_ERR_OR_ZERO(prio);
1987 }
1988
1989 int mlx5_init_fs(struct mlx5_core_dev *dev)
1990 {
1991 struct mlx5_flow_steering *steering;
1992 int err = 0;
1993
1994 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1995 return 0;
1996
1997 err = mlx5_init_fc_stats(dev);
1998 if (err)
1999 return err;
2000
2001 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2002 if (!steering)
2003 return -ENOMEM;
2004 steering->dev = dev;
2005 dev->priv.steering = steering;
2006
2007 if (MLX5_CAP_GEN(dev, nic_flow_table) &&
2008 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2009 err = init_root_ns(steering);
2010 if (err)
2011 goto err;
2012 }
2013
2014 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
2015 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2016 err = init_fdb_root_ns(steering);
2017 if (err)
2018 goto err;
2019 }
2020 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2021 err = init_egress_acl_root_ns(steering);
2022 if (err)
2023 goto err;
2024 }
2025 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2026 err = init_ingress_acl_root_ns(steering);
2027 if (err)
2028 goto err;
2029 }
2030 }
2031
2032 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2033 err = init_sniffer_rx_root_ns(steering);
2034 if (err)
2035 goto err;
2036 }
2037
2038 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2039 err = init_sniffer_tx_root_ns(steering);
2040 if (err)
2041 goto err;
2042 }
2043
2044 return 0;
2045 err:
2046 mlx5_cleanup_fs(dev);
2047 return err;
2048 }