]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
Merge branch 'parisc-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / core_acl_flex_actions.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/rhashtable.h>
40 #include <linux/list.h>
41
42 #include "item.h"
43 #include "trap.h"
44 #include "core_acl_flex_actions.h"
45
46 enum mlxsw_afa_set_type {
47 MLXSW_AFA_SET_TYPE_NEXT,
48 MLXSW_AFA_SET_TYPE_GOTO,
49 };
50
51 /* afa_set_type
52 * Type of the record at the end of the action set.
53 */
54 MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
55
56 /* afa_set_next_action_set_ptr
57 * A pointer to the next action set in the KVD Centralized database.
58 */
59 MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
60
61 /* afa_set_goto_g
62 * group - When set, the binding is of an ACL group. When cleared,
63 * the binding is of an ACL.
64 * Must be set to 1 for Spectrum.
65 */
66 MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
67
68 enum mlxsw_afa_set_goto_binding_cmd {
69 /* continue go the next binding point */
70 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
71 /* jump to the next binding point no return */
72 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
73 /* terminate the acl binding */
74 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
75 };
76
77 /* afa_set_goto_binding_cmd */
78 MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
79
80 /* afa_set_goto_next_binding
81 * ACL/ACL group identifier. If the g bit is set, this field should hold
82 * the acl_group_id, else it should hold the acl_id.
83 */
84 MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
85
86 /* afa_all_action_type
87 * Action Type.
88 */
89 MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
90
91 struct mlxsw_afa {
92 unsigned int max_acts_per_set;
93 const struct mlxsw_afa_ops *ops;
94 void *ops_priv;
95 struct rhashtable set_ht;
96 struct rhashtable fwd_entry_ht;
97 };
98
99 #define MLXSW_AFA_SET_LEN 0xA8
100
101 struct mlxsw_afa_set_ht_key {
102 char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
103 bool is_first;
104 };
105
106 /* Set structure holds one action set record. It contains up to three
107 * actions (depends on size of particular actions). The set is either
108 * put directly to a rule, or it is stored in KVD linear area.
109 * To prevent duplicate entries in KVD linear area, a hashtable is
110 * used to track sets that were previously inserted and may be shared.
111 */
112
113 struct mlxsw_afa_set {
114 struct rhash_head ht_node;
115 struct mlxsw_afa_set_ht_key ht_key;
116 u32 kvdl_index;
117 bool shared; /* Inserted in hashtable (doesn't mean that
118 * kvdl_index is valid).
119 */
120 unsigned int ref_count;
121 struct mlxsw_afa_set *next; /* Pointer to the next set. */
122 struct mlxsw_afa_set *prev; /* Pointer to the previous set,
123 * note that set may have multiple
124 * sets from multiple blocks
125 * pointing at it. This is only
126 * usable until commit.
127 */
128 };
129
130 static const struct rhashtable_params mlxsw_afa_set_ht_params = {
131 .key_len = sizeof(struct mlxsw_afa_set_ht_key),
132 .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
133 .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
134 .automatic_shrinking = true,
135 };
136
137 struct mlxsw_afa_fwd_entry_ht_key {
138 u8 local_port;
139 };
140
141 struct mlxsw_afa_fwd_entry {
142 struct rhash_head ht_node;
143 struct mlxsw_afa_fwd_entry_ht_key ht_key;
144 u32 kvdl_index;
145 unsigned int ref_count;
146 };
147
148 static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
149 .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
150 .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
151 .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
152 .automatic_shrinking = true,
153 };
154
155 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
156 const struct mlxsw_afa_ops *ops,
157 void *ops_priv)
158 {
159 struct mlxsw_afa *mlxsw_afa;
160 int err;
161
162 mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
163 if (!mlxsw_afa)
164 return ERR_PTR(-ENOMEM);
165 err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
166 if (err)
167 goto err_set_rhashtable_init;
168 err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
169 &mlxsw_afa_fwd_entry_ht_params);
170 if (err)
171 goto err_fwd_entry_rhashtable_init;
172 mlxsw_afa->max_acts_per_set = max_acts_per_set;
173 mlxsw_afa->ops = ops;
174 mlxsw_afa->ops_priv = ops_priv;
175 return mlxsw_afa;
176
177 err_fwd_entry_rhashtable_init:
178 rhashtable_destroy(&mlxsw_afa->set_ht);
179 err_set_rhashtable_init:
180 kfree(mlxsw_afa);
181 return ERR_PTR(err);
182 }
183 EXPORT_SYMBOL(mlxsw_afa_create);
184
185 void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
186 {
187 rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
188 rhashtable_destroy(&mlxsw_afa->set_ht);
189 kfree(mlxsw_afa);
190 }
191 EXPORT_SYMBOL(mlxsw_afa_destroy);
192
193 static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
194 enum mlxsw_afa_set_goto_binding_cmd cmd,
195 u16 group_id)
196 {
197 char *actions = set->ht_key.enc_actions;
198
199 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
200 mlxsw_afa_set_goto_g_set(actions, true);
201 mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
202 mlxsw_afa_set_goto_next_binding_set(actions, group_id);
203 }
204
205 static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
206 u32 next_set_kvdl_index)
207 {
208 char *actions = set->ht_key.enc_actions;
209
210 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
211 mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
212 }
213
214 static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
215 {
216 struct mlxsw_afa_set *set;
217
218 set = kzalloc(sizeof(*set), GFP_KERNEL);
219 if (!set)
220 return NULL;
221 /* Need to initialize the set to pass by default */
222 mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
223 set->ht_key.is_first = is_first;
224 set->ref_count = 1;
225 return set;
226 }
227
228 static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
229 {
230 kfree(set);
231 }
232
233 static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
234 struct mlxsw_afa_set *set)
235 {
236 int err;
237
238 err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
239 mlxsw_afa_set_ht_params);
240 if (err)
241 return err;
242 err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
243 &set->kvdl_index,
244 set->ht_key.enc_actions,
245 set->ht_key.is_first);
246 if (err)
247 goto err_kvdl_set_add;
248 set->shared = true;
249 set->prev = NULL;
250 return 0;
251
252 err_kvdl_set_add:
253 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
254 mlxsw_afa_set_ht_params);
255 return err;
256 }
257
258 static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
259 struct mlxsw_afa_set *set)
260 {
261 mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
262 set->kvdl_index,
263 set->ht_key.is_first);
264 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
265 mlxsw_afa_set_ht_params);
266 set->shared = false;
267 }
268
269 static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
270 struct mlxsw_afa_set *set)
271 {
272 if (--set->ref_count)
273 return;
274 if (set->shared)
275 mlxsw_afa_set_unshare(mlxsw_afa, set);
276 mlxsw_afa_set_destroy(set);
277 }
278
279 static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
280 struct mlxsw_afa_set *orig_set)
281 {
282 struct mlxsw_afa_set *set;
283 int err;
284
285 /* There is a hashtable of sets maintained. If a set with the exact
286 * same encoding exists, we reuse it. Otherwise, the current set
287 * is shared by making it available to others using the hash table.
288 */
289 set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
290 mlxsw_afa_set_ht_params);
291 if (set) {
292 set->ref_count++;
293 mlxsw_afa_set_put(mlxsw_afa, orig_set);
294 } else {
295 set = orig_set;
296 err = mlxsw_afa_set_share(mlxsw_afa, set);
297 if (err)
298 return ERR_PTR(err);
299 }
300 return set;
301 }
302
303 /* Block structure holds a list of action sets. One action block
304 * represents one chain of actions executed upon match of a rule.
305 */
306
307 struct mlxsw_afa_block {
308 struct mlxsw_afa *afa;
309 bool finished;
310 struct mlxsw_afa_set *first_set;
311 struct mlxsw_afa_set *cur_set;
312 unsigned int cur_act_index; /* In current set. */
313 struct list_head resource_list; /* List of resources held by actions
314 * in this block.
315 */
316 };
317
318 struct mlxsw_afa_resource {
319 struct list_head list;
320 void (*destructor)(struct mlxsw_afa_block *block,
321 struct mlxsw_afa_resource *resource);
322 };
323
324 static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
325 struct mlxsw_afa_resource *resource)
326 {
327 list_add(&resource->list, &block->resource_list);
328 }
329
330 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
331 {
332 struct mlxsw_afa_resource *resource, *tmp;
333
334 list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
335 list_del(&resource->list);
336 resource->destructor(block, resource);
337 }
338 }
339
340 struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
341 {
342 struct mlxsw_afa_block *block;
343
344 block = kzalloc(sizeof(*block), GFP_KERNEL);
345 if (!block)
346 return NULL;
347 INIT_LIST_HEAD(&block->resource_list);
348 block->afa = mlxsw_afa;
349
350 /* At least one action set is always present, so just create it here */
351 block->first_set = mlxsw_afa_set_create(true);
352 if (!block->first_set)
353 goto err_first_set_create;
354 block->cur_set = block->first_set;
355 return block;
356
357 err_first_set_create:
358 kfree(block);
359 return NULL;
360 }
361 EXPORT_SYMBOL(mlxsw_afa_block_create);
362
363 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
364 {
365 struct mlxsw_afa_set *set = block->first_set;
366 struct mlxsw_afa_set *next_set;
367
368 do {
369 next_set = set->next;
370 mlxsw_afa_set_put(block->afa, set);
371 set = next_set;
372 } while (set);
373 mlxsw_afa_resources_destroy(block);
374 kfree(block);
375 }
376 EXPORT_SYMBOL(mlxsw_afa_block_destroy);
377
378 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
379 {
380 struct mlxsw_afa_set *set = block->cur_set;
381 struct mlxsw_afa_set *prev_set;
382
383 block->cur_set = NULL;
384 block->finished = true;
385
386 /* Go over all linked sets starting from last
387 * and try to find existing set in the hash table.
388 * In case it is not there, assign a KVD linear index
389 * and insert it.
390 */
391 do {
392 prev_set = set->prev;
393 set = mlxsw_afa_set_get(block->afa, set);
394 if (IS_ERR(set))
395 /* No rollback is needed since the chain is
396 * in consistent state and mlxsw_afa_block_destroy
397 * will take care of putting it away.
398 */
399 return PTR_ERR(set);
400 if (prev_set) {
401 prev_set->next = set;
402 mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
403 set = prev_set;
404 }
405 } while (prev_set);
406
407 block->first_set = set;
408 return 0;
409 }
410 EXPORT_SYMBOL(mlxsw_afa_block_commit);
411
412 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
413 {
414 return block->first_set->ht_key.enc_actions;
415 }
416 EXPORT_SYMBOL(mlxsw_afa_block_first_set);
417
418 u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
419 {
420 return block->first_set->kvdl_index;
421 }
422 EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
423
424 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
425 {
426 if (block->finished)
427 return -EINVAL;
428 mlxsw_afa_set_goto_set(block->cur_set,
429 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
430 block->finished = true;
431 return 0;
432 }
433 EXPORT_SYMBOL(mlxsw_afa_block_continue);
434
435 int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
436 {
437 if (block->finished)
438 return -EINVAL;
439 mlxsw_afa_set_goto_set(block->cur_set,
440 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
441 block->finished = true;
442 return 0;
443 }
444 EXPORT_SYMBOL(mlxsw_afa_block_jump);
445
446 static struct mlxsw_afa_fwd_entry *
447 mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
448 {
449 struct mlxsw_afa_fwd_entry *fwd_entry;
450 int err;
451
452 fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
453 if (!fwd_entry)
454 return ERR_PTR(-ENOMEM);
455 fwd_entry->ht_key.local_port = local_port;
456 fwd_entry->ref_count = 1;
457
458 err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
459 &fwd_entry->ht_node,
460 mlxsw_afa_fwd_entry_ht_params);
461 if (err)
462 goto err_rhashtable_insert;
463
464 err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
465 &fwd_entry->kvdl_index,
466 local_port);
467 if (err)
468 goto err_kvdl_fwd_entry_add;
469 return fwd_entry;
470
471 err_kvdl_fwd_entry_add:
472 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
473 mlxsw_afa_fwd_entry_ht_params);
474 err_rhashtable_insert:
475 kfree(fwd_entry);
476 return ERR_PTR(err);
477 }
478
479 static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
480 struct mlxsw_afa_fwd_entry *fwd_entry)
481 {
482 mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
483 fwd_entry->kvdl_index);
484 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
485 mlxsw_afa_fwd_entry_ht_params);
486 kfree(fwd_entry);
487 }
488
489 static struct mlxsw_afa_fwd_entry *
490 mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
491 {
492 struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
493 struct mlxsw_afa_fwd_entry *fwd_entry;
494
495 ht_key.local_port = local_port;
496 fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
497 mlxsw_afa_fwd_entry_ht_params);
498 if (fwd_entry) {
499 fwd_entry->ref_count++;
500 return fwd_entry;
501 }
502 return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
503 }
504
505 static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
506 struct mlxsw_afa_fwd_entry *fwd_entry)
507 {
508 if (--fwd_entry->ref_count)
509 return;
510 mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
511 }
512
513 struct mlxsw_afa_fwd_entry_ref {
514 struct mlxsw_afa_resource resource;
515 struct mlxsw_afa_fwd_entry *fwd_entry;
516 };
517
518 static void
519 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
520 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
521 {
522 mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
523 kfree(fwd_entry_ref);
524 }
525
526 static void
527 mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
528 struct mlxsw_afa_resource *resource)
529 {
530 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
531
532 fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
533 resource);
534 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
535 }
536
537 static struct mlxsw_afa_fwd_entry_ref *
538 mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
539 {
540 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
541 struct mlxsw_afa_fwd_entry *fwd_entry;
542 int err;
543
544 fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
545 if (!fwd_entry_ref)
546 return ERR_PTR(-ENOMEM);
547 fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
548 if (IS_ERR(fwd_entry)) {
549 err = PTR_ERR(fwd_entry);
550 goto err_fwd_entry_get;
551 }
552 fwd_entry_ref->fwd_entry = fwd_entry;
553 fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
554 mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
555 return fwd_entry_ref;
556
557 err_fwd_entry_get:
558 kfree(fwd_entry_ref);
559 return ERR_PTR(err);
560 }
561
562 struct mlxsw_afa_counter {
563 struct mlxsw_afa_resource resource;
564 u32 counter_index;
565 };
566
567 static void
568 mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
569 struct mlxsw_afa_counter *counter)
570 {
571 block->afa->ops->counter_index_put(block->afa->ops_priv,
572 counter->counter_index);
573 kfree(counter);
574 }
575
576 static void
577 mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
578 struct mlxsw_afa_resource *resource)
579 {
580 struct mlxsw_afa_counter *counter;
581
582 counter = container_of(resource, struct mlxsw_afa_counter, resource);
583 mlxsw_afa_counter_destroy(block, counter);
584 }
585
586 static struct mlxsw_afa_counter *
587 mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
588 {
589 struct mlxsw_afa_counter *counter;
590 int err;
591
592 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
593 if (!counter)
594 return ERR_PTR(-ENOMEM);
595
596 err = block->afa->ops->counter_index_get(block->afa->ops_priv,
597 &counter->counter_index);
598 if (err)
599 goto err_counter_index_get;
600 counter->resource.destructor = mlxsw_afa_counter_destructor;
601 mlxsw_afa_resource_add(block, &counter->resource);
602 return counter;
603
604 err_counter_index_get:
605 kfree(counter);
606 return ERR_PTR(err);
607 }
608
609 #define MLXSW_AFA_ONE_ACTION_LEN 32
610 #define MLXSW_AFA_PAYLOAD_OFFSET 4
611
612 static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
613 u8 action_code, u8 action_size)
614 {
615 char *oneact;
616 char *actions;
617
618 if (WARN_ON(block->finished))
619 return NULL;
620 if (block->cur_act_index + action_size >
621 block->afa->max_acts_per_set) {
622 struct mlxsw_afa_set *set;
623
624 /* The appended action won't fit into the current action set,
625 * so create a new set.
626 */
627 set = mlxsw_afa_set_create(false);
628 if (!set)
629 return NULL;
630 set->prev = block->cur_set;
631 block->cur_act_index = 0;
632 block->cur_set->next = set;
633 block->cur_set = set;
634 }
635
636 actions = block->cur_set->ht_key.enc_actions;
637 oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
638 block->cur_act_index += action_size;
639 mlxsw_afa_all_action_type_set(oneact, action_code);
640 return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
641 }
642
643 /* VLAN Action
644 * -----------
645 * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
646 * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
647 * and more.
648 */
649
650 #define MLXSW_AFA_VLAN_CODE 0x02
651 #define MLXSW_AFA_VLAN_SIZE 1
652
653 enum mlxsw_afa_vlan_vlan_tag_cmd {
654 MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
655 MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
656 MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
657 };
658
659 enum mlxsw_afa_vlan_cmd {
660 MLXSW_AFA_VLAN_CMD_NOP,
661 MLXSW_AFA_VLAN_CMD_SET_OUTER,
662 MLXSW_AFA_VLAN_CMD_SET_INNER,
663 MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
664 MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
665 MLXSW_AFA_VLAN_CMD_SWAP,
666 };
667
668 /* afa_vlan_vlan_tag_cmd
669 * Tag command: push, pop, nop VLAN header.
670 */
671 MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
672
673 /* afa_vlan_vid_cmd */
674 MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
675
676 /* afa_vlan_vid */
677 MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
678
679 /* afa_vlan_ethertype_cmd */
680 MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
681
682 /* afa_vlan_ethertype
683 * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
684 */
685 MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
686
687 /* afa_vlan_pcp_cmd */
688 MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
689
690 /* afa_vlan_pcp */
691 MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
692
693 static inline void
694 mlxsw_afa_vlan_pack(char *payload,
695 enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
696 enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
697 enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
698 enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
699 {
700 mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
701 mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
702 mlxsw_afa_vlan_vid_set(payload, vid);
703 mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
704 mlxsw_afa_vlan_pcp_set(payload, pcp);
705 mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
706 mlxsw_afa_vlan_ethertype_set(payload, ethertype);
707 }
708
709 int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
710 u16 vid, u8 pcp, u8 et)
711 {
712 char *act = mlxsw_afa_block_append_action(block,
713 MLXSW_AFA_VLAN_CODE,
714 MLXSW_AFA_VLAN_SIZE);
715
716 if (!act)
717 return -ENOBUFS;
718 mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
719 MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
720 MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
721 MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
722 return 0;
723 }
724 EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
725
726 /* Trap / Discard Action
727 * ---------------------
728 * The Trap / Discard action enables trapping / mirroring packets to the CPU
729 * as well as discarding packets.
730 * The ACL Trap / Discard separates the forward/discard control from CPU
731 * trap control. In addition, the Trap / Discard action enables activating
732 * SPAN (port mirroring).
733 */
734
735 #define MLXSW_AFA_TRAPDISC_CODE 0x03
736 #define MLXSW_AFA_TRAPDISC_SIZE 1
737
738 enum mlxsw_afa_trapdisc_trap_action {
739 MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP = 0,
740 MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP = 2,
741 };
742
743 /* afa_trapdisc_trap_action
744 * Trap Action.
745 */
746 MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
747
748 enum mlxsw_afa_trapdisc_forward_action {
749 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1,
750 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
751 };
752
753 /* afa_trapdisc_forward_action
754 * Forward Action.
755 */
756 MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
757
758 /* afa_trapdisc_trap_id
759 * Trap ID to configure.
760 */
761 MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
762
763 /* afa_trapdisc_mirror_agent
764 * Mirror agent.
765 */
766 MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
767
768 /* afa_trapdisc_mirror_enable
769 * Mirror enable.
770 */
771 MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
772
773 static inline void
774 mlxsw_afa_trapdisc_pack(char *payload,
775 enum mlxsw_afa_trapdisc_trap_action trap_action,
776 enum mlxsw_afa_trapdisc_forward_action forward_action,
777 u16 trap_id)
778 {
779 mlxsw_afa_trapdisc_trap_action_set(payload, trap_action);
780 mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
781 mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
782 }
783
784 static inline void
785 mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
786 u8 mirror_agent)
787 {
788 mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
789 mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
790 }
791
792 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
793 {
794 char *act = mlxsw_afa_block_append_action(block,
795 MLXSW_AFA_TRAPDISC_CODE,
796 MLXSW_AFA_TRAPDISC_SIZE);
797
798 if (!act)
799 return -ENOBUFS;
800 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
801 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
802 return 0;
803 }
804 EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
805
806 int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
807 {
808 char *act = mlxsw_afa_block_append_action(block,
809 MLXSW_AFA_TRAPDISC_CODE,
810 MLXSW_AFA_TRAPDISC_SIZE);
811
812 if (!act)
813 return -ENOBUFS;
814 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
815 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
816 trap_id);
817 return 0;
818 }
819 EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
820
821 int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
822 u16 trap_id)
823 {
824 char *act = mlxsw_afa_block_append_action(block,
825 MLXSW_AFA_TRAPDISC_CODE,
826 MLXSW_AFA_TRAPDISC_SIZE);
827
828 if (!act)
829 return -ENOBUFS;
830 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
831 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
832 trap_id);
833 return 0;
834 }
835 EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
836
837 struct mlxsw_afa_mirror {
838 struct mlxsw_afa_resource resource;
839 int span_id;
840 u8 local_in_port;
841 u8 local_out_port;
842 bool ingress;
843 };
844
845 static void
846 mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
847 struct mlxsw_afa_mirror *mirror)
848 {
849 block->afa->ops->mirror_del(block->afa->ops_priv,
850 mirror->local_in_port,
851 mirror->local_out_port,
852 mirror->ingress);
853 kfree(mirror);
854 }
855
856 static void
857 mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
858 struct mlxsw_afa_resource *resource)
859 {
860 struct mlxsw_afa_mirror *mirror;
861
862 mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
863 mlxsw_afa_mirror_destroy(block, mirror);
864 }
865
866 static struct mlxsw_afa_mirror *
867 mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
868 u8 local_in_port, u8 local_out_port,
869 bool ingress)
870 {
871 struct mlxsw_afa_mirror *mirror;
872 int err;
873
874 mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
875 if (!mirror)
876 return ERR_PTR(-ENOMEM);
877
878 err = block->afa->ops->mirror_add(block->afa->ops_priv,
879 local_in_port, local_out_port,
880 ingress, &mirror->span_id);
881 if (err)
882 goto err_mirror_add;
883
884 mirror->ingress = ingress;
885 mirror->local_out_port = local_out_port;
886 mirror->local_in_port = local_in_port;
887 mirror->resource.destructor = mlxsw_afa_mirror_destructor;
888 mlxsw_afa_resource_add(block, &mirror->resource);
889 return mirror;
890
891 err_mirror_add:
892 kfree(mirror);
893 return ERR_PTR(err);
894 }
895
896 static int
897 mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
898 u8 mirror_agent)
899 {
900 char *act = mlxsw_afa_block_append_action(block,
901 MLXSW_AFA_TRAPDISC_CODE,
902 MLXSW_AFA_TRAPDISC_SIZE);
903 if (!act)
904 return -ENOBUFS;
905 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
906 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
907 mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
908 return 0;
909 }
910
911 int
912 mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
913 u8 local_in_port, u8 local_out_port, bool ingress)
914 {
915 struct mlxsw_afa_mirror *mirror;
916 int err;
917
918 mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
919 ingress);
920 if (IS_ERR(mirror))
921 return PTR_ERR(mirror);
922
923 err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
924 if (err)
925 goto err_append_allocated_mirror;
926
927 return 0;
928
929 err_append_allocated_mirror:
930 mlxsw_afa_mirror_destroy(block, mirror);
931 return err;
932 }
933 EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
934
935 /* Forwarding Action
936 * -----------------
937 * Forwarding Action can be used to implement Policy Based Switching (PBS)
938 * as well as OpenFlow related "Output" action.
939 */
940
941 #define MLXSW_AFA_FORWARD_CODE 0x07
942 #define MLXSW_AFA_FORWARD_SIZE 1
943
944 enum mlxsw_afa_forward_type {
945 /* PBS, Policy Based Switching */
946 MLXSW_AFA_FORWARD_TYPE_PBS,
947 /* Output, OpenFlow output type */
948 MLXSW_AFA_FORWARD_TYPE_OUTPUT,
949 };
950
951 /* afa_forward_type */
952 MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
953
954 /* afa_forward_pbs_ptr
955 * A pointer to the PBS entry configured by PPBS register.
956 * Reserved when in_port is set.
957 */
958 MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
959
960 /* afa_forward_in_port
961 * Packet is forwarded back to the ingress port.
962 */
963 MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
964
965 static inline void
966 mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
967 u32 pbs_ptr, bool in_port)
968 {
969 mlxsw_afa_forward_type_set(payload, type);
970 mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
971 mlxsw_afa_forward_in_port_set(payload, in_port);
972 }
973
974 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
975 u8 local_port, bool in_port)
976 {
977 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
978 u32 kvdl_index;
979 char *act;
980 int err;
981
982 if (in_port)
983 return -EOPNOTSUPP;
984 fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
985 if (IS_ERR(fwd_entry_ref))
986 return PTR_ERR(fwd_entry_ref);
987 kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
988
989 act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
990 MLXSW_AFA_FORWARD_SIZE);
991 if (!act) {
992 err = -ENOBUFS;
993 goto err_append_action;
994 }
995 mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
996 kvdl_index, in_port);
997 return 0;
998
999 err_append_action:
1000 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
1001 return err;
1002 }
1003 EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
1004
1005 /* Policing and Counting Action
1006 * ----------------------------
1007 * Policing and Counting action is used for binding policer and counter
1008 * to ACL rules.
1009 */
1010
1011 #define MLXSW_AFA_POLCNT_CODE 0x08
1012 #define MLXSW_AFA_POLCNT_SIZE 1
1013
1014 enum mlxsw_afa_polcnt_counter_set_type {
1015 /* No count */
1016 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
1017 /* Count packets and bytes */
1018 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
1019 /* Count only packets */
1020 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
1021 };
1022
1023 /* afa_polcnt_counter_set_type
1024 * Counter set type for flow counters.
1025 */
1026 MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
1027
1028 /* afa_polcnt_counter_index
1029 * Counter index for flow counters.
1030 */
1031 MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
1032
1033 static inline void
1034 mlxsw_afa_polcnt_pack(char *payload,
1035 enum mlxsw_afa_polcnt_counter_set_type set_type,
1036 u32 counter_index)
1037 {
1038 mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
1039 mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
1040 }
1041
1042 int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
1043 u32 counter_index)
1044 {
1045 char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
1046 MLXSW_AFA_POLCNT_SIZE);
1047 if (!act)
1048 return -ENOBUFS;
1049 mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
1050 counter_index);
1051 return 0;
1052 }
1053 EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
1054
1055 int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
1056 u32 *p_counter_index)
1057 {
1058 struct mlxsw_afa_counter *counter;
1059 u32 counter_index;
1060 int err;
1061
1062 counter = mlxsw_afa_counter_create(block);
1063 if (IS_ERR(counter))
1064 return PTR_ERR(counter);
1065 counter_index = counter->counter_index;
1066
1067 err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
1068 if (err)
1069 goto err_append_allocated_counter;
1070
1071 if (p_counter_index)
1072 *p_counter_index = counter_index;
1073 return 0;
1074
1075 err_append_allocated_counter:
1076 mlxsw_afa_counter_destroy(block, counter);
1077 return err;
1078 }
1079 EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
1080
1081 /* Virtual Router and Forwarding Domain Action
1082 * -------------------------------------------
1083 * Virtual Switch action is used for manipulate the Virtual Router (VR),
1084 * MPLS label space and the Forwarding Identifier (FID).
1085 */
1086
1087 #define MLXSW_AFA_VIRFWD_CODE 0x0E
1088 #define MLXSW_AFA_VIRFWD_SIZE 1
1089
1090 enum mlxsw_afa_virfwd_fid_cmd {
1091 /* Do nothing */
1092 MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
1093 /* Set the Forwarding Identifier (FID) to fid */
1094 MLXSW_AFA_VIRFWD_FID_CMD_SET,
1095 };
1096
1097 /* afa_virfwd_fid_cmd */
1098 MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
1099
1100 /* afa_virfwd_fid
1101 * The FID value.
1102 */
1103 MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
1104
1105 static inline void mlxsw_afa_virfwd_pack(char *payload,
1106 enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
1107 u16 fid)
1108 {
1109 mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
1110 mlxsw_afa_virfwd_fid_set(payload, fid);
1111 }
1112
1113 int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
1114 {
1115 char *act = mlxsw_afa_block_append_action(block,
1116 MLXSW_AFA_VIRFWD_CODE,
1117 MLXSW_AFA_VIRFWD_SIZE);
1118 if (!act)
1119 return -ENOBUFS;
1120 mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
1121 return 0;
1122 }
1123 EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
1124
1125 /* MC Routing Action
1126 * -----------------
1127 * The Multicast router action. Can be used by RMFT_V2 - Router Multicast
1128 * Forwarding Table Version 2 Register.
1129 */
1130
1131 #define MLXSW_AFA_MCROUTER_CODE 0x10
1132 #define MLXSW_AFA_MCROUTER_SIZE 2
1133
1134 enum mlxsw_afa_mcrouter_rpf_action {
1135 MLXSW_AFA_MCROUTER_RPF_ACTION_NOP,
1136 MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1137 MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR,
1138 };
1139
1140 /* afa_mcrouter_rpf_action */
1141 MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3);
1142
1143 /* afa_mcrouter_expected_irif */
1144 MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16);
1145
1146 /* afa_mcrouter_min_mtu */
1147 MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16);
1148
1149 enum mlxsw_afa_mrouter_vrmid {
1150 MLXSW_AFA_MCROUTER_VRMID_INVALID,
1151 MLXSW_AFA_MCROUTER_VRMID_VALID
1152 };
1153
1154 /* afa_mcrouter_vrmid
1155 * Valid RMID: rigr_rmid_index is used as RMID
1156 */
1157 MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1);
1158
1159 /* afa_mcrouter_rigr_rmid_index
1160 * When the vrmid field is set to invalid, the field is used as pointer to
1161 * Router Interface Group (RIGR) Table in the KVD linear.
1162 * When the vrmid is set to valid, the field is used as RMID index, ranged
1163 * from 0 to max_mid - 1. The index is to the Port Group Table.
1164 */
1165 MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24);
1166
1167 static inline void
1168 mlxsw_afa_mcrouter_pack(char *payload,
1169 enum mlxsw_afa_mcrouter_rpf_action rpf_action,
1170 u16 expected_irif, u16 min_mtu,
1171 enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index)
1172
1173 {
1174 mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action);
1175 mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif);
1176 mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu);
1177 mlxsw_afa_mcrouter_vrmid_set(payload, vrmid);
1178 mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index);
1179 }
1180
1181 int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
1182 u16 expected_irif, u16 min_mtu,
1183 bool rmid_valid, u32 kvdl_index)
1184 {
1185 char *act = mlxsw_afa_block_append_action(block,
1186 MLXSW_AFA_MCROUTER_CODE,
1187 MLXSW_AFA_MCROUTER_SIZE);
1188 if (!act)
1189 return -ENOBUFS;
1190 mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1191 expected_irif, min_mtu, rmid_valid, kvdl_index);
1192 return 0;
1193 }
1194 EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);