]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
mlxsw: reg: Add Router Algorithmic LPM Unicast Entry Register definition
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_router.c
CommitLineData
464dce18
IS
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36#include <linux/kernel.h>
37#include <linux/types.h>
5e9c16cc
JP
38#include <linux/rhashtable.h>
39#include <linux/bitops.h>
40#include <linux/in6.h>
464dce18
IS
41
42#include "spectrum.h"
43#include "core.h"
44#include "reg.h"
45
53342023
JP
46#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
47 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
48
6b75c480
JP
49static bool
50mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
51 struct mlxsw_sp_prefix_usage *prefix_usage2)
52{
53 unsigned char prefix;
54
55 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
56 if (!test_bit(prefix, prefix_usage2->b))
57 return false;
58 }
59 return true;
60}
61
53342023
JP
62static bool
63mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
64 struct mlxsw_sp_prefix_usage *prefix_usage2)
65{
66 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
67}
68
6b75c480
JP
69static bool
70mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
71{
72 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
73
74 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
75}
76
77static void
78mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
79 struct mlxsw_sp_prefix_usage *prefix_usage2)
80{
81 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
82}
83
84static void
85mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
86{
87 memset(prefix_usage, 0, sizeof(*prefix_usage));
88}
89
5e9c16cc
JP
90static void
91mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
92 unsigned char prefix_len)
93{
94 set_bit(prefix_len, prefix_usage->b);
95}
96
97static void
98mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
99 unsigned char prefix_len)
100{
101 clear_bit(prefix_len, prefix_usage->b);
102}
103
104struct mlxsw_sp_fib_key {
105 unsigned char addr[sizeof(struct in6_addr)];
106 unsigned char prefix_len;
107};
108
109struct mlxsw_sp_fib_entry {
110 struct rhash_head ht_node;
111 struct mlxsw_sp_fib_key key;
112};
113
114struct mlxsw_sp_fib {
115 struct rhashtable ht;
116 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
117 struct mlxsw_sp_prefix_usage prefix_usage;
118};
119
120static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
121 .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
122 .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
123 .key_len = sizeof(struct mlxsw_sp_fib_key),
124 .automatic_shrinking = true,
125};
126
127static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
128 struct mlxsw_sp_fib_entry *fib_entry)
129{
130 unsigned char prefix_len = fib_entry->key.prefix_len;
131 int err;
132
133 err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
134 mlxsw_sp_fib_ht_params);
135 if (err)
136 return err;
137 if (fib->prefix_ref_count[prefix_len]++ == 0)
138 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
139 return 0;
140}
141
142static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
143 struct mlxsw_sp_fib_entry *fib_entry)
144{
145 unsigned char prefix_len = fib_entry->key.prefix_len;
146
147 if (--fib->prefix_ref_count[prefix_len] == 0)
148 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
149 rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
150 mlxsw_sp_fib_ht_params);
151}
152
153static struct mlxsw_sp_fib_entry *
154mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
155 size_t addr_len, unsigned char prefix_len)
156{
157 struct mlxsw_sp_fib_entry *fib_entry;
158
159 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
160 if (!fib_entry)
161 return NULL;
162 memcpy(fib_entry->key.addr, addr, addr_len);
163 fib_entry->key.prefix_len = prefix_len;
164 return fib_entry;
165}
166
167static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
168{
169 kfree(fib_entry);
170}
171
172static struct mlxsw_sp_fib_entry *
173mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
174 size_t addr_len, unsigned char prefix_len)
175{
176 struct mlxsw_sp_fib_key key = {{ 0 } };
177
178 memcpy(key.addr, addr, addr_len);
179 key.prefix_len = prefix_len;
180 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
181}
182
183static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
184{
185 struct mlxsw_sp_fib *fib;
186 int err;
187
188 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
189 if (!fib)
190 return ERR_PTR(-ENOMEM);
191 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
192 if (err)
193 goto err_rhashtable_init;
194 return fib;
195
196err_rhashtable_init:
197 kfree(fib);
198 return ERR_PTR(err);
199}
200
201static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
202{
203 rhashtable_destroy(&fib->ht);
204 kfree(fib);
205}
206
53342023
JP
207static struct mlxsw_sp_lpm_tree *
208mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
209{
210 static struct mlxsw_sp_lpm_tree *lpm_tree;
211 int i;
212
213 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
214 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
215 if (lpm_tree->ref_count == 0) {
216 if (one_reserved)
217 one_reserved = false;
218 else
219 return lpm_tree;
220 }
221 }
222 return NULL;
223}
224
225static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
226 struct mlxsw_sp_lpm_tree *lpm_tree)
227{
228 char ralta_pl[MLXSW_REG_RALTA_LEN];
229
230 mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
231 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
232}
233
234static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
235 struct mlxsw_sp_lpm_tree *lpm_tree)
236{
237 char ralta_pl[MLXSW_REG_RALTA_LEN];
238
239 mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
241}
242
243static int
244mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
245 struct mlxsw_sp_prefix_usage *prefix_usage,
246 struct mlxsw_sp_lpm_tree *lpm_tree)
247{
248 char ralst_pl[MLXSW_REG_RALST_LEN];
249 u8 root_bin = 0;
250 u8 prefix;
251 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
252
253 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
254 root_bin = prefix;
255
256 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
257 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
258 if (prefix == 0)
259 continue;
260 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
261 MLXSW_REG_RALST_BIN_NO_CHILD);
262 last_prefix = prefix;
263 }
264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
265}
266
267static struct mlxsw_sp_lpm_tree *
268mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
269 struct mlxsw_sp_prefix_usage *prefix_usage,
270 enum mlxsw_sp_l3proto proto, bool one_reserved)
271{
272 struct mlxsw_sp_lpm_tree *lpm_tree;
273 int err;
274
275 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
276 if (!lpm_tree)
277 return ERR_PTR(-EBUSY);
278 lpm_tree->proto = proto;
279 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
280 if (err)
281 return ERR_PTR(err);
282
283 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
284 lpm_tree);
285 if (err)
286 goto err_left_struct_set;
287 return lpm_tree;
288
289err_left_struct_set:
290 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
291 return ERR_PTR(err);
292}
293
294static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_lpm_tree *lpm_tree)
296{
297 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
298}
299
300static struct mlxsw_sp_lpm_tree *
301mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
302 struct mlxsw_sp_prefix_usage *prefix_usage,
303 enum mlxsw_sp_l3proto proto, bool one_reserved)
304{
305 struct mlxsw_sp_lpm_tree *lpm_tree;
306 int i;
307
308 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
309 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
310 if (lpm_tree->proto == proto &&
311 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
312 prefix_usage))
313 goto inc_ref_count;
314 }
315 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
316 proto, one_reserved);
317 if (IS_ERR(lpm_tree))
318 return lpm_tree;
319
320inc_ref_count:
321 lpm_tree->ref_count++;
322 return lpm_tree;
323}
324
325static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
326 struct mlxsw_sp_lpm_tree *lpm_tree)
327{
328 if (--lpm_tree->ref_count == 0)
329 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
330 return 0;
331}
332
333static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
334{
335 struct mlxsw_sp_lpm_tree *lpm_tree;
336 int i;
337
338 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
339 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
340 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
341 }
342}
343
6b75c480
JP
344static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
345{
346 struct mlxsw_sp_vr *vr;
347 int i;
348
349 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
350 vr = &mlxsw_sp->router.vrs[i];
351 if (!vr->used)
352 return vr;
353 }
354 return NULL;
355}
356
357static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
358 struct mlxsw_sp_vr *vr)
359{
360 char raltb_pl[MLXSW_REG_RALTB_LEN];
361
362 mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
363 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
364}
365
366static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
367 struct mlxsw_sp_vr *vr)
368{
369 char raltb_pl[MLXSW_REG_RALTB_LEN];
370
371 /* Bind to tree 0 which is default */
372 mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
373 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
374}
375
376static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
377{
378 /* For our purpose, squash main and local table into one */
379 if (tb_id == RT_TABLE_LOCAL)
380 tb_id = RT_TABLE_MAIN;
381 return tb_id;
382}
383
384static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
385 u32 tb_id,
386 enum mlxsw_sp_l3proto proto)
387{
388 struct mlxsw_sp_vr *vr;
389 int i;
390
391 tb_id = mlxsw_sp_fix_tb_id(tb_id);
392 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
393 vr = &mlxsw_sp->router.vrs[i];
394 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
395 return vr;
396 }
397 return NULL;
398}
399
400static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
401 unsigned char prefix_len,
402 u32 tb_id,
403 enum mlxsw_sp_l3proto proto)
404{
405 struct mlxsw_sp_prefix_usage req_prefix_usage;
406 struct mlxsw_sp_lpm_tree *lpm_tree;
407 struct mlxsw_sp_vr *vr;
408 int err;
409
410 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
411 if (!vr)
412 return ERR_PTR(-EBUSY);
413 vr->fib = mlxsw_sp_fib_create();
414 if (IS_ERR(vr->fib))
415 return ERR_CAST(vr->fib);
416
417 vr->proto = proto;
418 vr->tb_id = tb_id;
419 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
420 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
421 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
422 proto, true);
423 if (IS_ERR(lpm_tree)) {
424 err = PTR_ERR(lpm_tree);
425 goto err_tree_get;
426 }
427 vr->lpm_tree = lpm_tree;
428 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
429 if (err)
430 goto err_tree_bind;
431
432 vr->used = true;
433 return vr;
434
435err_tree_bind:
436 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
437err_tree_get:
438 mlxsw_sp_fib_destroy(vr->fib);
439
440 return ERR_PTR(err);
441}
442
443static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
444 struct mlxsw_sp_vr *vr)
445{
446 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
447 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
448 mlxsw_sp_fib_destroy(vr->fib);
449 vr->used = false;
450}
451
452static int
453mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
454 struct mlxsw_sp_prefix_usage *req_prefix_usage)
455{
456 struct mlxsw_sp_lpm_tree *lpm_tree;
457
458 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
459 &vr->lpm_tree->prefix_usage))
460 return 0;
461
462 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
463 vr->proto, false);
464 if (IS_ERR(lpm_tree)) {
465 /* We failed to get a tree according to the required
466 * prefix usage. However, the current tree might be still good
467 * for us if our requirement is subset of the prefixes used
468 * in the tree.
469 */
470 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
471 &vr->lpm_tree->prefix_usage))
472 return 0;
473 return PTR_ERR(lpm_tree);
474 }
475
476 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
477 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
478 vr->lpm_tree = lpm_tree;
479 return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
480}
481
482static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
483 unsigned char prefix_len,
484 u32 tb_id,
485 enum mlxsw_sp_l3proto proto)
486{
487 struct mlxsw_sp_vr *vr;
488 int err;
489
490 tb_id = mlxsw_sp_fix_tb_id(tb_id);
491 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
492 if (!vr) {
493 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
494 if (IS_ERR(vr))
495 return vr;
496 } else {
497 struct mlxsw_sp_prefix_usage req_prefix_usage;
498
499 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
500 &vr->fib->prefix_usage);
501 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
502 /* Need to replace LPM tree in case new prefix is required. */
503 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
504 &req_prefix_usage);
505 if (err)
506 return ERR_PTR(err);
507 }
508 return vr;
509}
510
511static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
512{
513 /* Destroy virtual router entity in case the associated FIB is empty
514 * and allow it to be used for other tables in future. Otherwise,
515 * check if some prefix usage did not disappear and change tree if
516 * that is the case. Note that in case new, smaller tree cannot be
517 * allocated, the original one will be kept being used.
518 */
519 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
520 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
521 else
522 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
523 &vr->fib->prefix_usage);
524}
525
526static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
527{
528 struct mlxsw_sp_vr *vr;
529 int i;
530
531 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
532 vr = &mlxsw_sp->router.vrs[i];
533 vr->id = i;
534 }
535}
536
464dce18
IS
537static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
538{
539 char rgcr_pl[MLXSW_REG_RGCR_LEN];
540
541 mlxsw_reg_rgcr_pack(rgcr_pl, true);
542 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
543 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
544}
545
546static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
547{
548 char rgcr_pl[MLXSW_REG_RGCR_LEN];
549
550 mlxsw_reg_rgcr_pack(rgcr_pl, false);
551 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
552}
553
554int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
555{
53342023
JP
556 int err;
557
558 err = __mlxsw_sp_router_init(mlxsw_sp);
559 if (err)
560 return err;
561 mlxsw_sp_lpm_init(mlxsw_sp);
6b75c480 562 mlxsw_sp_vrs_init(mlxsw_sp);
53342023 563 return 0;
464dce18
IS
564}
565
566void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
567{
568 __mlxsw_sp_router_fini(mlxsw_sp);
569}