]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / core_acl_flex_actions.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/rhashtable.h>
40 #include <linux/list.h>
41
42 #include "item.h"
43 #include "trap.h"
44 #include "core_acl_flex_actions.h"
45
46 enum mlxsw_afa_set_type {
47 MLXSW_AFA_SET_TYPE_NEXT,
48 MLXSW_AFA_SET_TYPE_GOTO,
49 };
50
51 /* afa_set_type
52 * Type of the record at the end of the action set.
53 */
54 MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
55
56 /* afa_set_next_action_set_ptr
57 * A pointer to the next action set in the KVD Centralized database.
58 */
59 MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
60
61 /* afa_set_goto_g
62 * group - When set, the binding is of an ACL group. When cleared,
63 * the binding is of an ACL.
64 * Must be set to 1 for Spectrum.
65 */
66 MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
67
68 enum mlxsw_afa_set_goto_binding_cmd {
69 /* continue go the next binding point */
70 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
71 /* jump to the next binding point no return */
72 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
73 /* terminate the acl binding */
74 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
75 };
76
77 /* afa_set_goto_binding_cmd */
78 MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
79
80 /* afa_set_goto_next_binding
81 * ACL/ACL group identifier. If the g bit is set, this field should hold
82 * the acl_group_id, else it should hold the acl_id.
83 */
84 MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
85
86 /* afa_all_action_type
87 * Action Type.
88 */
89 MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
90
91 struct mlxsw_afa {
92 unsigned int max_acts_per_set;
93 const struct mlxsw_afa_ops *ops;
94 void *ops_priv;
95 struct rhashtable set_ht;
96 struct rhashtable fwd_entry_ht;
97 };
98
99 #define MLXSW_AFA_SET_LEN 0xA8
100
101 struct mlxsw_afa_set_ht_key {
102 char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
103 bool is_first;
104 };
105
106 /* Set structure holds one action set record. It contains up to three
107 * actions (depends on size of particular actions). The set is either
108 * put directly to a rule, or it is stored in KVD linear area.
109 * To prevent duplicate entries in KVD linear area, a hashtable is
110 * used to track sets that were previously inserted and may be shared.
111 */
112
113 struct mlxsw_afa_set {
114 struct rhash_head ht_node;
115 struct mlxsw_afa_set_ht_key ht_key;
116 u32 kvdl_index;
117 bool shared; /* Inserted in hashtable (doesn't mean that
118 * kvdl_index is valid).
119 */
120 unsigned int ref_count;
121 struct mlxsw_afa_set *next; /* Pointer to the next set. */
122 struct mlxsw_afa_set *prev; /* Pointer to the previous set,
123 * note that set may have multiple
124 * sets from multiple blocks
125 * pointing at it. This is only
126 * usable until commit.
127 */
128 };
129
130 static const struct rhashtable_params mlxsw_afa_set_ht_params = {
131 .key_len = sizeof(struct mlxsw_afa_set_ht_key),
132 .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
133 .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
134 .automatic_shrinking = true,
135 };
136
137 struct mlxsw_afa_fwd_entry_ht_key {
138 u8 local_port;
139 };
140
141 struct mlxsw_afa_fwd_entry {
142 struct rhash_head ht_node;
143 struct mlxsw_afa_fwd_entry_ht_key ht_key;
144 u32 kvdl_index;
145 unsigned int ref_count;
146 };
147
148 static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
149 .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
150 .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
151 .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
152 .automatic_shrinking = true,
153 };
154
155 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
156 const struct mlxsw_afa_ops *ops,
157 void *ops_priv)
158 {
159 struct mlxsw_afa *mlxsw_afa;
160 int err;
161
162 mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
163 if (!mlxsw_afa)
164 return ERR_PTR(-ENOMEM);
165 err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
166 if (err)
167 goto err_set_rhashtable_init;
168 err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
169 &mlxsw_afa_fwd_entry_ht_params);
170 if (err)
171 goto err_fwd_entry_rhashtable_init;
172 mlxsw_afa->max_acts_per_set = max_acts_per_set;
173 mlxsw_afa->ops = ops;
174 mlxsw_afa->ops_priv = ops_priv;
175 return mlxsw_afa;
176
177 err_fwd_entry_rhashtable_init:
178 rhashtable_destroy(&mlxsw_afa->set_ht);
179 err_set_rhashtable_init:
180 kfree(mlxsw_afa);
181 return ERR_PTR(err);
182 }
183 EXPORT_SYMBOL(mlxsw_afa_create);
184
185 void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
186 {
187 rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
188 rhashtable_destroy(&mlxsw_afa->set_ht);
189 kfree(mlxsw_afa);
190 }
191 EXPORT_SYMBOL(mlxsw_afa_destroy);
192
193 static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
194 enum mlxsw_afa_set_goto_binding_cmd cmd,
195 u16 group_id)
196 {
197 char *actions = set->ht_key.enc_actions;
198
199 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
200 mlxsw_afa_set_goto_g_set(actions, true);
201 mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
202 mlxsw_afa_set_goto_next_binding_set(actions, group_id);
203 }
204
205 static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
206 u32 next_set_kvdl_index)
207 {
208 char *actions = set->ht_key.enc_actions;
209
210 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
211 mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
212 }
213
214 static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
215 {
216 struct mlxsw_afa_set *set;
217
218 set = kzalloc(sizeof(*set), GFP_KERNEL);
219 if (!set)
220 return NULL;
221 /* Need to initialize the set to pass by default */
222 mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
223 set->ht_key.is_first = is_first;
224 set->ref_count = 1;
225 return set;
226 }
227
228 static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
229 {
230 kfree(set);
231 }
232
233 static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
234 struct mlxsw_afa_set *set)
235 {
236 int err;
237
238 err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
239 mlxsw_afa_set_ht_params);
240 if (err)
241 return err;
242 err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
243 &set->kvdl_index,
244 set->ht_key.enc_actions,
245 set->ht_key.is_first);
246 if (err)
247 goto err_kvdl_set_add;
248 set->shared = true;
249 set->prev = NULL;
250 return 0;
251
252 err_kvdl_set_add:
253 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
254 mlxsw_afa_set_ht_params);
255 return err;
256 }
257
258 static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
259 struct mlxsw_afa_set *set)
260 {
261 mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
262 set->kvdl_index,
263 set->ht_key.is_first);
264 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
265 mlxsw_afa_set_ht_params);
266 set->shared = false;
267 }
268
269 static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
270 struct mlxsw_afa_set *set)
271 {
272 if (--set->ref_count)
273 return;
274 if (set->shared)
275 mlxsw_afa_set_unshare(mlxsw_afa, set);
276 mlxsw_afa_set_destroy(set);
277 }
278
279 static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
280 struct mlxsw_afa_set *orig_set)
281 {
282 struct mlxsw_afa_set *set;
283 int err;
284
285 /* There is a hashtable of sets maintained. If a set with the exact
286 * same encoding exists, we reuse it. Otherwise, the current set
287 * is shared by making it available to others using the hash table.
288 */
289 set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
290 mlxsw_afa_set_ht_params);
291 if (set) {
292 set->ref_count++;
293 mlxsw_afa_set_put(mlxsw_afa, orig_set);
294 } else {
295 set = orig_set;
296 err = mlxsw_afa_set_share(mlxsw_afa, set);
297 if (err)
298 return ERR_PTR(err);
299 }
300 return set;
301 }
302
303 /* Block structure holds a list of action sets. One action block
304 * represents one chain of actions executed upon match of a rule.
305 */
306
307 struct mlxsw_afa_block {
308 struct mlxsw_afa *afa;
309 bool finished;
310 struct mlxsw_afa_set *first_set;
311 struct mlxsw_afa_set *cur_set;
312 unsigned int cur_act_index; /* In current set. */
313 struct list_head resource_list; /* List of resources held by actions
314 * in this block.
315 */
316 };
317
318 struct mlxsw_afa_resource {
319 struct list_head list;
320 void (*destructor)(struct mlxsw_afa_block *block,
321 struct mlxsw_afa_resource *resource);
322 };
323
324 static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
325 struct mlxsw_afa_resource *resource)
326 {
327 list_add(&resource->list, &block->resource_list);
328 }
329
330 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
331 {
332 struct mlxsw_afa_resource *resource, *tmp;
333
334 list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
335 list_del(&resource->list);
336 resource->destructor(block, resource);
337 }
338 }
339
340 struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
341 {
342 struct mlxsw_afa_block *block;
343
344 block = kzalloc(sizeof(*block), GFP_KERNEL);
345 if (!block)
346 return NULL;
347 INIT_LIST_HEAD(&block->resource_list);
348 block->afa = mlxsw_afa;
349
350 /* At least one action set is always present, so just create it here */
351 block->first_set = mlxsw_afa_set_create(true);
352 if (!block->first_set)
353 goto err_first_set_create;
354 block->cur_set = block->first_set;
355 return block;
356
357 err_first_set_create:
358 kfree(block);
359 return NULL;
360 }
361 EXPORT_SYMBOL(mlxsw_afa_block_create);
362
363 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
364 {
365 struct mlxsw_afa_set *set = block->first_set;
366 struct mlxsw_afa_set *next_set;
367
368 do {
369 next_set = set->next;
370 mlxsw_afa_set_put(block->afa, set);
371 set = next_set;
372 } while (set);
373 mlxsw_afa_resources_destroy(block);
374 kfree(block);
375 }
376 EXPORT_SYMBOL(mlxsw_afa_block_destroy);
377
378 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
379 {
380 struct mlxsw_afa_set *set = block->cur_set;
381 struct mlxsw_afa_set *prev_set;
382
383 block->cur_set = NULL;
384 block->finished = true;
385
386 /* Go over all linked sets starting from last
387 * and try to find existing set in the hash table.
388 * In case it is not there, assign a KVD linear index
389 * and insert it.
390 */
391 do {
392 prev_set = set->prev;
393 set = mlxsw_afa_set_get(block->afa, set);
394 if (IS_ERR(set))
395 /* No rollback is needed since the chain is
396 * in consistent state and mlxsw_afa_block_destroy
397 * will take care of putting it away.
398 */
399 return PTR_ERR(set);
400 if (prev_set) {
401 prev_set->next = set;
402 mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
403 set = prev_set;
404 }
405 } while (prev_set);
406
407 block->first_set = set;
408 return 0;
409 }
410 EXPORT_SYMBOL(mlxsw_afa_block_commit);
411
412 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
413 {
414 return block->first_set->ht_key.enc_actions;
415 }
416 EXPORT_SYMBOL(mlxsw_afa_block_first_set);
417
418 u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
419 {
420 return block->first_set->kvdl_index;
421 }
422 EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
423
424 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
425 {
426 if (block->finished)
427 return -EINVAL;
428 mlxsw_afa_set_goto_set(block->cur_set,
429 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
430 block->finished = true;
431 return 0;
432 }
433 EXPORT_SYMBOL(mlxsw_afa_block_continue);
434
435 int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
436 {
437 if (block->finished)
438 return -EINVAL;
439 mlxsw_afa_set_goto_set(block->cur_set,
440 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
441 block->finished = true;
442 return 0;
443 }
444 EXPORT_SYMBOL(mlxsw_afa_block_jump);
445
446 int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
447 {
448 if (block->finished)
449 return -EINVAL;
450 mlxsw_afa_set_goto_set(block->cur_set,
451 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
452 block->finished = true;
453 return 0;
454 }
455 EXPORT_SYMBOL(mlxsw_afa_block_terminate);
456
457 static struct mlxsw_afa_fwd_entry *
458 mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
459 {
460 struct mlxsw_afa_fwd_entry *fwd_entry;
461 int err;
462
463 fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
464 if (!fwd_entry)
465 return ERR_PTR(-ENOMEM);
466 fwd_entry->ht_key.local_port = local_port;
467 fwd_entry->ref_count = 1;
468
469 err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
470 &fwd_entry->ht_node,
471 mlxsw_afa_fwd_entry_ht_params);
472 if (err)
473 goto err_rhashtable_insert;
474
475 err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
476 &fwd_entry->kvdl_index,
477 local_port);
478 if (err)
479 goto err_kvdl_fwd_entry_add;
480 return fwd_entry;
481
482 err_kvdl_fwd_entry_add:
483 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
484 mlxsw_afa_fwd_entry_ht_params);
485 err_rhashtable_insert:
486 kfree(fwd_entry);
487 return ERR_PTR(err);
488 }
489
490 static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
491 struct mlxsw_afa_fwd_entry *fwd_entry)
492 {
493 mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
494 fwd_entry->kvdl_index);
495 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
496 mlxsw_afa_fwd_entry_ht_params);
497 kfree(fwd_entry);
498 }
499
500 static struct mlxsw_afa_fwd_entry *
501 mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
502 {
503 struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
504 struct mlxsw_afa_fwd_entry *fwd_entry;
505
506 ht_key.local_port = local_port;
507 fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
508 mlxsw_afa_fwd_entry_ht_params);
509 if (fwd_entry) {
510 fwd_entry->ref_count++;
511 return fwd_entry;
512 }
513 return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
514 }
515
516 static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
517 struct mlxsw_afa_fwd_entry *fwd_entry)
518 {
519 if (--fwd_entry->ref_count)
520 return;
521 mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
522 }
523
524 struct mlxsw_afa_fwd_entry_ref {
525 struct mlxsw_afa_resource resource;
526 struct mlxsw_afa_fwd_entry *fwd_entry;
527 };
528
529 static void
530 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
531 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
532 {
533 mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
534 kfree(fwd_entry_ref);
535 }
536
537 static void
538 mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
539 struct mlxsw_afa_resource *resource)
540 {
541 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
542
543 fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
544 resource);
545 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
546 }
547
548 static struct mlxsw_afa_fwd_entry_ref *
549 mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
550 {
551 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
552 struct mlxsw_afa_fwd_entry *fwd_entry;
553 int err;
554
555 fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
556 if (!fwd_entry_ref)
557 return ERR_PTR(-ENOMEM);
558 fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
559 if (IS_ERR(fwd_entry)) {
560 err = PTR_ERR(fwd_entry);
561 goto err_fwd_entry_get;
562 }
563 fwd_entry_ref->fwd_entry = fwd_entry;
564 fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
565 mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
566 return fwd_entry_ref;
567
568 err_fwd_entry_get:
569 kfree(fwd_entry_ref);
570 return ERR_PTR(err);
571 }
572
573 struct mlxsw_afa_counter {
574 struct mlxsw_afa_resource resource;
575 u32 counter_index;
576 };
577
578 static void
579 mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
580 struct mlxsw_afa_counter *counter)
581 {
582 block->afa->ops->counter_index_put(block->afa->ops_priv,
583 counter->counter_index);
584 kfree(counter);
585 }
586
587 static void
588 mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
589 struct mlxsw_afa_resource *resource)
590 {
591 struct mlxsw_afa_counter *counter;
592
593 counter = container_of(resource, struct mlxsw_afa_counter, resource);
594 mlxsw_afa_counter_destroy(block, counter);
595 }
596
597 static struct mlxsw_afa_counter *
598 mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
599 {
600 struct mlxsw_afa_counter *counter;
601 int err;
602
603 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
604 if (!counter)
605 return ERR_PTR(-ENOMEM);
606
607 err = block->afa->ops->counter_index_get(block->afa->ops_priv,
608 &counter->counter_index);
609 if (err)
610 goto err_counter_index_get;
611 counter->resource.destructor = mlxsw_afa_counter_destructor;
612 mlxsw_afa_resource_add(block, &counter->resource);
613 return counter;
614
615 err_counter_index_get:
616 kfree(counter);
617 return ERR_PTR(err);
618 }
619
620 #define MLXSW_AFA_ONE_ACTION_LEN 32
621 #define MLXSW_AFA_PAYLOAD_OFFSET 4
622
623 static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
624 u8 action_code, u8 action_size)
625 {
626 char *oneact;
627 char *actions;
628
629 if (WARN_ON(block->finished))
630 return NULL;
631 if (block->cur_act_index + action_size >
632 block->afa->max_acts_per_set) {
633 struct mlxsw_afa_set *set;
634
635 /* The appended action won't fit into the current action set,
636 * so create a new set.
637 */
638 set = mlxsw_afa_set_create(false);
639 if (!set)
640 return NULL;
641 set->prev = block->cur_set;
642 block->cur_act_index = 0;
643 block->cur_set->next = set;
644 block->cur_set = set;
645 }
646
647 actions = block->cur_set->ht_key.enc_actions;
648 oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
649 block->cur_act_index += action_size;
650 mlxsw_afa_all_action_type_set(oneact, action_code);
651 return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
652 }
653
654 /* VLAN Action
655 * -----------
656 * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
657 * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
658 * and more.
659 */
660
661 #define MLXSW_AFA_VLAN_CODE 0x02
662 #define MLXSW_AFA_VLAN_SIZE 1
663
664 enum mlxsw_afa_vlan_vlan_tag_cmd {
665 MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
666 MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
667 MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
668 };
669
670 enum mlxsw_afa_vlan_cmd {
671 MLXSW_AFA_VLAN_CMD_NOP,
672 MLXSW_AFA_VLAN_CMD_SET_OUTER,
673 MLXSW_AFA_VLAN_CMD_SET_INNER,
674 MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
675 MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
676 MLXSW_AFA_VLAN_CMD_SWAP,
677 };
678
679 /* afa_vlan_vlan_tag_cmd
680 * Tag command: push, pop, nop VLAN header.
681 */
682 MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
683
684 /* afa_vlan_vid_cmd */
685 MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
686
687 /* afa_vlan_vid */
688 MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
689
690 /* afa_vlan_ethertype_cmd */
691 MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
692
693 /* afa_vlan_ethertype
694 * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
695 */
696 MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
697
698 /* afa_vlan_pcp_cmd */
699 MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
700
701 /* afa_vlan_pcp */
702 MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
703
704 static inline void
705 mlxsw_afa_vlan_pack(char *payload,
706 enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
707 enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
708 enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
709 enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
710 {
711 mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
712 mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
713 mlxsw_afa_vlan_vid_set(payload, vid);
714 mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
715 mlxsw_afa_vlan_pcp_set(payload, pcp);
716 mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
717 mlxsw_afa_vlan_ethertype_set(payload, ethertype);
718 }
719
720 int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
721 u16 vid, u8 pcp, u8 et)
722 {
723 char *act = mlxsw_afa_block_append_action(block,
724 MLXSW_AFA_VLAN_CODE,
725 MLXSW_AFA_VLAN_SIZE);
726
727 if (!act)
728 return -ENOBUFS;
729 mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
730 MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
731 MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
732 MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
733 return 0;
734 }
735 EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
736
737 /* Trap / Discard Action
738 * ---------------------
739 * The Trap / Discard action enables trapping / mirroring packets to the CPU
740 * as well as discarding packets.
741 * The ACL Trap / Discard separates the forward/discard control from CPU
742 * trap control. In addition, the Trap / Discard action enables activating
743 * SPAN (port mirroring).
744 */
745
746 #define MLXSW_AFA_TRAPDISC_CODE 0x03
747 #define MLXSW_AFA_TRAPDISC_SIZE 1
748
749 enum mlxsw_afa_trapdisc_trap_action {
750 MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP = 0,
751 MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP = 2,
752 };
753
754 /* afa_trapdisc_trap_action
755 * Trap Action.
756 */
757 MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
758
759 enum mlxsw_afa_trapdisc_forward_action {
760 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1,
761 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
762 };
763
764 /* afa_trapdisc_forward_action
765 * Forward Action.
766 */
767 MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
768
769 /* afa_trapdisc_trap_id
770 * Trap ID to configure.
771 */
772 MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
773
774 /* afa_trapdisc_mirror_agent
775 * Mirror agent.
776 */
777 MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
778
779 /* afa_trapdisc_mirror_enable
780 * Mirror enable.
781 */
782 MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
783
784 static inline void
785 mlxsw_afa_trapdisc_pack(char *payload,
786 enum mlxsw_afa_trapdisc_trap_action trap_action,
787 enum mlxsw_afa_trapdisc_forward_action forward_action,
788 u16 trap_id)
789 {
790 mlxsw_afa_trapdisc_trap_action_set(payload, trap_action);
791 mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
792 mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
793 }
794
795 static inline void
796 mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
797 u8 mirror_agent)
798 {
799 mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
800 mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
801 }
802
803 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
804 {
805 char *act = mlxsw_afa_block_append_action(block,
806 MLXSW_AFA_TRAPDISC_CODE,
807 MLXSW_AFA_TRAPDISC_SIZE);
808
809 if (!act)
810 return -ENOBUFS;
811 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
812 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
813 return 0;
814 }
815 EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
816
817 int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
818 {
819 char *act = mlxsw_afa_block_append_action(block,
820 MLXSW_AFA_TRAPDISC_CODE,
821 MLXSW_AFA_TRAPDISC_SIZE);
822
823 if (!act)
824 return -ENOBUFS;
825 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
826 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
827 trap_id);
828 return 0;
829 }
830 EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
831
832 int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
833 u16 trap_id)
834 {
835 char *act = mlxsw_afa_block_append_action(block,
836 MLXSW_AFA_TRAPDISC_CODE,
837 MLXSW_AFA_TRAPDISC_SIZE);
838
839 if (!act)
840 return -ENOBUFS;
841 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
842 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
843 trap_id);
844 return 0;
845 }
846 EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
847
848 struct mlxsw_afa_mirror {
849 struct mlxsw_afa_resource resource;
850 int span_id;
851 u8 local_in_port;
852 u8 local_out_port;
853 bool ingress;
854 };
855
856 static void
857 mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
858 struct mlxsw_afa_mirror *mirror)
859 {
860 block->afa->ops->mirror_del(block->afa->ops_priv,
861 mirror->local_in_port,
862 mirror->local_out_port,
863 mirror->ingress);
864 kfree(mirror);
865 }
866
867 static void
868 mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
869 struct mlxsw_afa_resource *resource)
870 {
871 struct mlxsw_afa_mirror *mirror;
872
873 mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
874 mlxsw_afa_mirror_destroy(block, mirror);
875 }
876
877 static struct mlxsw_afa_mirror *
878 mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
879 u8 local_in_port, u8 local_out_port,
880 bool ingress)
881 {
882 struct mlxsw_afa_mirror *mirror;
883 int err;
884
885 mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
886 if (!mirror)
887 return ERR_PTR(-ENOMEM);
888
889 err = block->afa->ops->mirror_add(block->afa->ops_priv,
890 local_in_port, local_out_port,
891 ingress, &mirror->span_id);
892 if (err)
893 goto err_mirror_add;
894
895 mirror->ingress = ingress;
896 mirror->local_out_port = local_out_port;
897 mirror->local_in_port = local_in_port;
898 mirror->resource.destructor = mlxsw_afa_mirror_destructor;
899 mlxsw_afa_resource_add(block, &mirror->resource);
900 return mirror;
901
902 err_mirror_add:
903 kfree(mirror);
904 return ERR_PTR(err);
905 }
906
907 static int
908 mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
909 u8 mirror_agent)
910 {
911 char *act = mlxsw_afa_block_append_action(block,
912 MLXSW_AFA_TRAPDISC_CODE,
913 MLXSW_AFA_TRAPDISC_SIZE);
914 if (!act)
915 return -ENOBUFS;
916 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
917 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
918 mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
919 return 0;
920 }
921
922 int
923 mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
924 u8 local_in_port, u8 local_out_port, bool ingress)
925 {
926 struct mlxsw_afa_mirror *mirror;
927 int err;
928
929 mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
930 ingress);
931 if (IS_ERR(mirror))
932 return PTR_ERR(mirror);
933
934 err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
935 if (err)
936 goto err_append_allocated_mirror;
937
938 return 0;
939
940 err_append_allocated_mirror:
941 mlxsw_afa_mirror_destroy(block, mirror);
942 return err;
943 }
944 EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
945
946 /* Forwarding Action
947 * -----------------
948 * Forwarding Action can be used to implement Policy Based Switching (PBS)
949 * as well as OpenFlow related "Output" action.
950 */
951
952 #define MLXSW_AFA_FORWARD_CODE 0x07
953 #define MLXSW_AFA_FORWARD_SIZE 1
954
955 enum mlxsw_afa_forward_type {
956 /* PBS, Policy Based Switching */
957 MLXSW_AFA_FORWARD_TYPE_PBS,
958 /* Output, OpenFlow output type */
959 MLXSW_AFA_FORWARD_TYPE_OUTPUT,
960 };
961
962 /* afa_forward_type */
963 MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
964
965 /* afa_forward_pbs_ptr
966 * A pointer to the PBS entry configured by PPBS register.
967 * Reserved when in_port is set.
968 */
969 MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
970
971 /* afa_forward_in_port
972 * Packet is forwarded back to the ingress port.
973 */
974 MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
975
976 static inline void
977 mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
978 u32 pbs_ptr, bool in_port)
979 {
980 mlxsw_afa_forward_type_set(payload, type);
981 mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
982 mlxsw_afa_forward_in_port_set(payload, in_port);
983 }
984
985 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
986 u8 local_port, bool in_port)
987 {
988 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
989 u32 kvdl_index;
990 char *act;
991 int err;
992
993 if (in_port)
994 return -EOPNOTSUPP;
995 fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
996 if (IS_ERR(fwd_entry_ref))
997 return PTR_ERR(fwd_entry_ref);
998 kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
999
1000 act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
1001 MLXSW_AFA_FORWARD_SIZE);
1002 if (!act) {
1003 err = -ENOBUFS;
1004 goto err_append_action;
1005 }
1006 mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
1007 kvdl_index, in_port);
1008 return 0;
1009
1010 err_append_action:
1011 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
1012 return err;
1013 }
1014 EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
1015
1016 /* Policing and Counting Action
1017 * ----------------------------
1018 * Policing and Counting action is used for binding policer and counter
1019 * to ACL rules.
1020 */
1021
1022 #define MLXSW_AFA_POLCNT_CODE 0x08
1023 #define MLXSW_AFA_POLCNT_SIZE 1
1024
1025 enum mlxsw_afa_polcnt_counter_set_type {
1026 /* No count */
1027 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
1028 /* Count packets and bytes */
1029 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
1030 /* Count only packets */
1031 MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
1032 };
1033
1034 /* afa_polcnt_counter_set_type
1035 * Counter set type for flow counters.
1036 */
1037 MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
1038
1039 /* afa_polcnt_counter_index
1040 * Counter index for flow counters.
1041 */
1042 MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
1043
1044 static inline void
1045 mlxsw_afa_polcnt_pack(char *payload,
1046 enum mlxsw_afa_polcnt_counter_set_type set_type,
1047 u32 counter_index)
1048 {
1049 mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
1050 mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
1051 }
1052
1053 int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
1054 u32 counter_index)
1055 {
1056 char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
1057 MLXSW_AFA_POLCNT_SIZE);
1058 if (!act)
1059 return -ENOBUFS;
1060 mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
1061 counter_index);
1062 return 0;
1063 }
1064 EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
1065
1066 int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
1067 u32 *p_counter_index)
1068 {
1069 struct mlxsw_afa_counter *counter;
1070 u32 counter_index;
1071 int err;
1072
1073 counter = mlxsw_afa_counter_create(block);
1074 if (IS_ERR(counter))
1075 return PTR_ERR(counter);
1076 counter_index = counter->counter_index;
1077
1078 err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
1079 if (err)
1080 goto err_append_allocated_counter;
1081
1082 if (p_counter_index)
1083 *p_counter_index = counter_index;
1084 return 0;
1085
1086 err_append_allocated_counter:
1087 mlxsw_afa_counter_destroy(block, counter);
1088 return err;
1089 }
1090 EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
1091
1092 /* Virtual Router and Forwarding Domain Action
1093 * -------------------------------------------
1094 * Virtual Switch action is used for manipulate the Virtual Router (VR),
1095 * MPLS label space and the Forwarding Identifier (FID).
1096 */
1097
1098 #define MLXSW_AFA_VIRFWD_CODE 0x0E
1099 #define MLXSW_AFA_VIRFWD_SIZE 1
1100
1101 enum mlxsw_afa_virfwd_fid_cmd {
1102 /* Do nothing */
1103 MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
1104 /* Set the Forwarding Identifier (FID) to fid */
1105 MLXSW_AFA_VIRFWD_FID_CMD_SET,
1106 };
1107
1108 /* afa_virfwd_fid_cmd */
1109 MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
1110
1111 /* afa_virfwd_fid
1112 * The FID value.
1113 */
1114 MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
1115
1116 static inline void mlxsw_afa_virfwd_pack(char *payload,
1117 enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
1118 u16 fid)
1119 {
1120 mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
1121 mlxsw_afa_virfwd_fid_set(payload, fid);
1122 }
1123
1124 int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
1125 {
1126 char *act = mlxsw_afa_block_append_action(block,
1127 MLXSW_AFA_VIRFWD_CODE,
1128 MLXSW_AFA_VIRFWD_SIZE);
1129 if (!act)
1130 return -ENOBUFS;
1131 mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
1132 return 0;
1133 }
1134 EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
1135
1136 /* MC Routing Action
1137 * -----------------
1138 * The Multicast router action. Can be used by RMFT_V2 - Router Multicast
1139 * Forwarding Table Version 2 Register.
1140 */
1141
1142 #define MLXSW_AFA_MCROUTER_CODE 0x10
1143 #define MLXSW_AFA_MCROUTER_SIZE 2
1144
1145 enum mlxsw_afa_mcrouter_rpf_action {
1146 MLXSW_AFA_MCROUTER_RPF_ACTION_NOP,
1147 MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1148 MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR,
1149 };
1150
1151 /* afa_mcrouter_rpf_action */
1152 MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3);
1153
1154 /* afa_mcrouter_expected_irif */
1155 MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16);
1156
1157 /* afa_mcrouter_min_mtu */
1158 MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16);
1159
1160 enum mlxsw_afa_mrouter_vrmid {
1161 MLXSW_AFA_MCROUTER_VRMID_INVALID,
1162 MLXSW_AFA_MCROUTER_VRMID_VALID
1163 };
1164
1165 /* afa_mcrouter_vrmid
1166 * Valid RMID: rigr_rmid_index is used as RMID
1167 */
1168 MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1);
1169
1170 /* afa_mcrouter_rigr_rmid_index
1171 * When the vrmid field is set to invalid, the field is used as pointer to
1172 * Router Interface Group (RIGR) Table in the KVD linear.
1173 * When the vrmid is set to valid, the field is used as RMID index, ranged
1174 * from 0 to max_mid - 1. The index is to the Port Group Table.
1175 */
1176 MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24);
1177
1178 static inline void
1179 mlxsw_afa_mcrouter_pack(char *payload,
1180 enum mlxsw_afa_mcrouter_rpf_action rpf_action,
1181 u16 expected_irif, u16 min_mtu,
1182 enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index)
1183
1184 {
1185 mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action);
1186 mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif);
1187 mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu);
1188 mlxsw_afa_mcrouter_vrmid_set(payload, vrmid);
1189 mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index);
1190 }
1191
1192 int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
1193 u16 expected_irif, u16 min_mtu,
1194 bool rmid_valid, u32 kvdl_index)
1195 {
1196 char *act = mlxsw_afa_block_append_action(block,
1197 MLXSW_AFA_MCROUTER_CODE,
1198 MLXSW_AFA_MCROUTER_SIZE);
1199 if (!act)
1200 return -ENOBUFS;
1201 mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1202 expected_irif, min_mtu, rmid_valid, kvdl_index);
1203 return 0;
1204 }
1205 EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);