2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <linux/inetdevice.h>
44 #include <net/netevent.h>
45 #include <net/neighbour.h>
47 #include <net/ip_fib.h>
54 struct list_head nexthop_list
;
55 struct list_head neigh_list
;
56 struct net_device
*dev
;
57 struct mlxsw_sp_fid
*f
;
58 unsigned char addr
[ETH_ALEN
];
63 static struct mlxsw_sp_rif
*
64 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp
*mlxsw_sp
,
65 const struct net_device
*dev
);
67 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
68 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
71 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
72 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
76 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage1
) {
77 if (!test_bit(prefix
, prefix_usage2
->b
))
84 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
85 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
87 return !memcmp(prefix_usage1
, prefix_usage2
, sizeof(*prefix_usage1
));
91 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage
*prefix_usage
)
93 struct mlxsw_sp_prefix_usage prefix_usage_none
= {{ 0 } };
95 return mlxsw_sp_prefix_usage_eq(prefix_usage
, &prefix_usage_none
);
99 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
100 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
102 memcpy(prefix_usage1
, prefix_usage2
, sizeof(*prefix_usage1
));
106 mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage
*prefix_usage
)
108 memset(prefix_usage
, 0, sizeof(*prefix_usage
));
112 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage
*prefix_usage
,
113 unsigned char prefix_len
)
115 set_bit(prefix_len
, prefix_usage
->b
);
119 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage
*prefix_usage
,
120 unsigned char prefix_len
)
122 clear_bit(prefix_len
, prefix_usage
->b
);
125 struct mlxsw_sp_fib_key
{
126 unsigned char addr
[sizeof(struct in6_addr
)];
127 unsigned char prefix_len
;
130 enum mlxsw_sp_fib_entry_type
{
131 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
,
132 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
,
133 MLXSW_SP_FIB_ENTRY_TYPE_TRAP
,
136 struct mlxsw_sp_nexthop_group
;
138 struct mlxsw_sp_fib_node
{
139 struct list_head entry_list
;
140 struct list_head list
;
141 struct rhash_head ht_node
;
142 struct mlxsw_sp_vr
*vr
;
143 struct mlxsw_sp_fib_key key
;
146 struct mlxsw_sp_fib_entry_params
{
153 struct mlxsw_sp_fib_entry
{
154 struct list_head list
;
155 struct mlxsw_sp_fib_node
*fib_node
;
156 enum mlxsw_sp_fib_entry_type type
;
157 struct list_head nexthop_group_node
;
158 struct mlxsw_sp_nexthop_group
*nh_group
;
159 struct mlxsw_sp_fib_entry_params params
;
163 struct mlxsw_sp_fib
{
164 struct rhashtable ht
;
165 struct list_head node_list
;
166 unsigned long prefix_ref_count
[MLXSW_SP_PREFIX_COUNT
];
167 struct mlxsw_sp_prefix_usage prefix_usage
;
170 static const struct rhashtable_params mlxsw_sp_fib_ht_params
;
172 static struct mlxsw_sp_fib
*mlxsw_sp_fib_create(void)
174 struct mlxsw_sp_fib
*fib
;
177 fib
= kzalloc(sizeof(*fib
), GFP_KERNEL
);
179 return ERR_PTR(-ENOMEM
);
180 err
= rhashtable_init(&fib
->ht
, &mlxsw_sp_fib_ht_params
);
182 goto err_rhashtable_init
;
183 INIT_LIST_HEAD(&fib
->node_list
);
191 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib
*fib
)
193 WARN_ON(!list_empty(&fib
->node_list
));
194 rhashtable_destroy(&fib
->ht
);
198 static struct mlxsw_sp_lpm_tree
*
199 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp
*mlxsw_sp
)
201 static struct mlxsw_sp_lpm_tree
*lpm_tree
;
204 for (i
= 0; i
< MLXSW_SP_LPM_TREE_COUNT
; i
++) {
205 lpm_tree
= &mlxsw_sp
->router
.lpm_trees
[i
];
206 if (lpm_tree
->ref_count
== 0)
212 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp
*mlxsw_sp
,
213 struct mlxsw_sp_lpm_tree
*lpm_tree
)
215 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
217 mlxsw_reg_ralta_pack(ralta_pl
, true,
218 (enum mlxsw_reg_ralxx_protocol
) lpm_tree
->proto
,
220 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
223 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp
*mlxsw_sp
,
224 struct mlxsw_sp_lpm_tree
*lpm_tree
)
226 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
228 mlxsw_reg_ralta_pack(ralta_pl
, false,
229 (enum mlxsw_reg_ralxx_protocol
) lpm_tree
->proto
,
231 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
235 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp
*mlxsw_sp
,
236 struct mlxsw_sp_prefix_usage
*prefix_usage
,
237 struct mlxsw_sp_lpm_tree
*lpm_tree
)
239 char ralst_pl
[MLXSW_REG_RALST_LEN
];
242 u8 last_prefix
= MLXSW_REG_RALST_BIN_NO_CHILD
;
244 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage
)
247 mlxsw_reg_ralst_pack(ralst_pl
, root_bin
, lpm_tree
->id
);
248 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage
) {
251 mlxsw_reg_ralst_bin_pack(ralst_pl
, prefix
, last_prefix
,
252 MLXSW_REG_RALST_BIN_NO_CHILD
);
253 last_prefix
= prefix
;
255 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralst
), ralst_pl
);
258 static struct mlxsw_sp_lpm_tree
*
259 mlxsw_sp_lpm_tree_create(struct mlxsw_sp
*mlxsw_sp
,
260 struct mlxsw_sp_prefix_usage
*prefix_usage
,
261 enum mlxsw_sp_l3proto proto
)
263 struct mlxsw_sp_lpm_tree
*lpm_tree
;
266 lpm_tree
= mlxsw_sp_lpm_tree_find_unused(mlxsw_sp
);
268 return ERR_PTR(-EBUSY
);
269 lpm_tree
->proto
= proto
;
270 err
= mlxsw_sp_lpm_tree_alloc(mlxsw_sp
, lpm_tree
);
274 err
= mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp
, prefix_usage
,
277 goto err_left_struct_set
;
278 memcpy(&lpm_tree
->prefix_usage
, prefix_usage
,
279 sizeof(lpm_tree
->prefix_usage
));
283 mlxsw_sp_lpm_tree_free(mlxsw_sp
, lpm_tree
);
287 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp
*mlxsw_sp
,
288 struct mlxsw_sp_lpm_tree
*lpm_tree
)
290 return mlxsw_sp_lpm_tree_free(mlxsw_sp
, lpm_tree
);
293 static struct mlxsw_sp_lpm_tree
*
294 mlxsw_sp_lpm_tree_get(struct mlxsw_sp
*mlxsw_sp
,
295 struct mlxsw_sp_prefix_usage
*prefix_usage
,
296 enum mlxsw_sp_l3proto proto
)
298 struct mlxsw_sp_lpm_tree
*lpm_tree
;
301 for (i
= 0; i
< MLXSW_SP_LPM_TREE_COUNT
; i
++) {
302 lpm_tree
= &mlxsw_sp
->router
.lpm_trees
[i
];
303 if (lpm_tree
->ref_count
!= 0 &&
304 lpm_tree
->proto
== proto
&&
305 mlxsw_sp_prefix_usage_eq(&lpm_tree
->prefix_usage
,
309 lpm_tree
= mlxsw_sp_lpm_tree_create(mlxsw_sp
, prefix_usage
,
311 if (IS_ERR(lpm_tree
))
315 lpm_tree
->ref_count
++;
319 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp
*mlxsw_sp
,
320 struct mlxsw_sp_lpm_tree
*lpm_tree
)
322 if (--lpm_tree
->ref_count
== 0)
323 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp
, lpm_tree
);
327 static void mlxsw_sp_lpm_init(struct mlxsw_sp
*mlxsw_sp
)
329 struct mlxsw_sp_lpm_tree
*lpm_tree
;
332 for (i
= 0; i
< MLXSW_SP_LPM_TREE_COUNT
; i
++) {
333 lpm_tree
= &mlxsw_sp
->router
.lpm_trees
[i
];
334 lpm_tree
->id
= i
+ MLXSW_SP_LPM_TREE_MIN
;
338 static struct mlxsw_sp_vr
*mlxsw_sp_vr_find_unused(struct mlxsw_sp
*mlxsw_sp
)
340 struct mlxsw_sp_vr
*vr
;
343 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
344 vr
= &mlxsw_sp
->router
.vrs
[i
];
351 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp
*mlxsw_sp
,
352 struct mlxsw_sp_vr
*vr
)
354 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
356 mlxsw_reg_raltb_pack(raltb_pl
, vr
->id
,
357 (enum mlxsw_reg_ralxx_protocol
) vr
->proto
,
359 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
), raltb_pl
);
362 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp
*mlxsw_sp
,
363 struct mlxsw_sp_vr
*vr
)
365 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
367 /* Bind to tree 0 which is default */
368 mlxsw_reg_raltb_pack(raltb_pl
, vr
->id
,
369 (enum mlxsw_reg_ralxx_protocol
) vr
->proto
, 0);
370 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
), raltb_pl
);
373 static u32
mlxsw_sp_fix_tb_id(u32 tb_id
)
375 /* For our purpose, squash main and local table into one */
376 if (tb_id
== RT_TABLE_LOCAL
)
377 tb_id
= RT_TABLE_MAIN
;
381 static struct mlxsw_sp_vr
*mlxsw_sp_vr_find(struct mlxsw_sp
*mlxsw_sp
,
383 enum mlxsw_sp_l3proto proto
)
385 struct mlxsw_sp_vr
*vr
;
388 tb_id
= mlxsw_sp_fix_tb_id(tb_id
);
390 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
391 vr
= &mlxsw_sp
->router
.vrs
[i
];
392 if (vr
->used
&& vr
->proto
== proto
&& vr
->tb_id
== tb_id
)
398 static struct mlxsw_sp_vr
*mlxsw_sp_vr_create(struct mlxsw_sp
*mlxsw_sp
,
399 unsigned char prefix_len
,
401 enum mlxsw_sp_l3proto proto
)
403 struct mlxsw_sp_prefix_usage req_prefix_usage
;
404 struct mlxsw_sp_lpm_tree
*lpm_tree
;
405 struct mlxsw_sp_vr
*vr
;
408 vr
= mlxsw_sp_vr_find_unused(mlxsw_sp
);
410 return ERR_PTR(-EBUSY
);
411 vr
->fib
= mlxsw_sp_fib_create();
413 return ERR_CAST(vr
->fib
);
417 mlxsw_sp_prefix_usage_zero(&req_prefix_usage
);
418 mlxsw_sp_prefix_usage_set(&req_prefix_usage
, prefix_len
);
419 lpm_tree
= mlxsw_sp_lpm_tree_get(mlxsw_sp
, &req_prefix_usage
,
421 if (IS_ERR(lpm_tree
)) {
422 err
= PTR_ERR(lpm_tree
);
425 vr
->lpm_tree
= lpm_tree
;
426 err
= mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp
, vr
);
434 mlxsw_sp_lpm_tree_put(mlxsw_sp
, vr
->lpm_tree
);
436 mlxsw_sp_fib_destroy(vr
->fib
);
441 static void mlxsw_sp_vr_destroy(struct mlxsw_sp
*mlxsw_sp
,
442 struct mlxsw_sp_vr
*vr
)
444 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp
, vr
);
445 mlxsw_sp_lpm_tree_put(mlxsw_sp
, vr
->lpm_tree
);
446 mlxsw_sp_fib_destroy(vr
->fib
);
451 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp
*mlxsw_sp
, struct mlxsw_sp_vr
*vr
,
452 struct mlxsw_sp_prefix_usage
*req_prefix_usage
)
454 struct mlxsw_sp_lpm_tree
*lpm_tree
= vr
->lpm_tree
;
455 struct mlxsw_sp_lpm_tree
*new_tree
;
458 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage
, &lpm_tree
->prefix_usage
))
461 new_tree
= mlxsw_sp_lpm_tree_get(mlxsw_sp
, req_prefix_usage
,
463 if (IS_ERR(new_tree
)) {
464 /* We failed to get a tree according to the required
465 * prefix usage. However, the current tree might be still good
466 * for us if our requirement is subset of the prefixes used
469 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage
,
470 &lpm_tree
->prefix_usage
))
472 return PTR_ERR(new_tree
);
475 /* Prevent packet loss by overwriting existing binding */
476 vr
->lpm_tree
= new_tree
;
477 err
= mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp
, vr
);
480 mlxsw_sp_lpm_tree_put(mlxsw_sp
, lpm_tree
);
485 vr
->lpm_tree
= lpm_tree
;
486 mlxsw_sp_lpm_tree_put(mlxsw_sp
, new_tree
);
490 static struct mlxsw_sp_vr
*mlxsw_sp_vr_get(struct mlxsw_sp
*mlxsw_sp
,
491 unsigned char prefix_len
,
493 enum mlxsw_sp_l3proto proto
)
495 struct mlxsw_sp_vr
*vr
;
498 tb_id
= mlxsw_sp_fix_tb_id(tb_id
);
499 vr
= mlxsw_sp_vr_find(mlxsw_sp
, tb_id
, proto
);
501 vr
= mlxsw_sp_vr_create(mlxsw_sp
, prefix_len
, tb_id
, proto
);
505 struct mlxsw_sp_prefix_usage req_prefix_usage
;
507 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage
,
508 &vr
->fib
->prefix_usage
);
509 mlxsw_sp_prefix_usage_set(&req_prefix_usage
, prefix_len
);
510 /* Need to replace LPM tree in case new prefix is required. */
511 err
= mlxsw_sp_vr_lpm_tree_check(mlxsw_sp
, vr
,
519 static void mlxsw_sp_vr_put(struct mlxsw_sp
*mlxsw_sp
, struct mlxsw_sp_vr
*vr
)
521 /* Destroy virtual router entity in case the associated FIB is empty
522 * and allow it to be used for other tables in future. Otherwise,
523 * check if some prefix usage did not disappear and change tree if
524 * that is the case. Note that in case new, smaller tree cannot be
525 * allocated, the original one will be kept being used.
527 if (mlxsw_sp_prefix_usage_none(&vr
->fib
->prefix_usage
))
528 mlxsw_sp_vr_destroy(mlxsw_sp
, vr
);
530 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp
, vr
,
531 &vr
->fib
->prefix_usage
);
534 static int mlxsw_sp_vrs_init(struct mlxsw_sp
*mlxsw_sp
)
536 struct mlxsw_sp_vr
*vr
;
540 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_VRS
))
543 max_vrs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
);
544 mlxsw_sp
->router
.vrs
= kcalloc(max_vrs
, sizeof(struct mlxsw_sp_vr
),
546 if (!mlxsw_sp
->router
.vrs
)
549 for (i
= 0; i
< max_vrs
; i
++) {
550 vr
= &mlxsw_sp
->router
.vrs
[i
];
557 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp
*mlxsw_sp
);
559 static void mlxsw_sp_vrs_fini(struct mlxsw_sp
*mlxsw_sp
)
561 /* At this stage we're guaranteed not to have new incoming
562 * FIB notifications and the work queue is free from FIBs
563 * sitting on top of mlxsw netdevs. However, we can still
564 * have other FIBs queued. Flush the queue before flushing
565 * the device's tables. No need for locks, as we're the only
568 mlxsw_core_flush_owq();
569 mlxsw_sp_router_fib_flush(mlxsw_sp
);
570 kfree(mlxsw_sp
->router
.vrs
);
573 struct mlxsw_sp_neigh_key
{
577 struct mlxsw_sp_neigh_entry
{
578 struct list_head rif_list_node
;
579 struct rhash_head ht_node
;
580 struct mlxsw_sp_neigh_key key
;
583 unsigned char ha
[ETH_ALEN
];
584 struct list_head nexthop_list
; /* list of nexthops using
587 struct list_head nexthop_neighs_list_node
;
590 static const struct rhashtable_params mlxsw_sp_neigh_ht_params
= {
591 .key_offset
= offsetof(struct mlxsw_sp_neigh_entry
, key
),
592 .head_offset
= offsetof(struct mlxsw_sp_neigh_entry
, ht_node
),
593 .key_len
= sizeof(struct mlxsw_sp_neigh_key
),
596 static struct mlxsw_sp_neigh_entry
*
597 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
,
600 struct mlxsw_sp_neigh_entry
*neigh_entry
;
602 neigh_entry
= kzalloc(sizeof(*neigh_entry
), GFP_KERNEL
);
606 neigh_entry
->key
.n
= n
;
607 neigh_entry
->rif
= rif
;
608 INIT_LIST_HEAD(&neigh_entry
->nexthop_list
);
613 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry
*neigh_entry
)
619 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp
*mlxsw_sp
,
620 struct mlxsw_sp_neigh_entry
*neigh_entry
)
622 return rhashtable_insert_fast(&mlxsw_sp
->router
.neigh_ht
,
623 &neigh_entry
->ht_node
,
624 mlxsw_sp_neigh_ht_params
);
628 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp
*mlxsw_sp
,
629 struct mlxsw_sp_neigh_entry
*neigh_entry
)
631 rhashtable_remove_fast(&mlxsw_sp
->router
.neigh_ht
,
632 &neigh_entry
->ht_node
,
633 mlxsw_sp_neigh_ht_params
);
636 static struct mlxsw_sp_neigh_entry
*
637 mlxsw_sp_neigh_entry_create(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
)
639 struct mlxsw_sp_neigh_entry
*neigh_entry
;
640 struct mlxsw_sp_rif
*r
;
643 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, n
->dev
);
645 return ERR_PTR(-EINVAL
);
647 neigh_entry
= mlxsw_sp_neigh_entry_alloc(mlxsw_sp
, n
, r
->rif
);
649 return ERR_PTR(-ENOMEM
);
651 err
= mlxsw_sp_neigh_entry_insert(mlxsw_sp
, neigh_entry
);
653 goto err_neigh_entry_insert
;
655 list_add(&neigh_entry
->rif_list_node
, &r
->neigh_list
);
659 err_neigh_entry_insert
:
660 mlxsw_sp_neigh_entry_free(neigh_entry
);
665 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
666 struct mlxsw_sp_neigh_entry
*neigh_entry
)
668 list_del(&neigh_entry
->rif_list_node
);
669 mlxsw_sp_neigh_entry_remove(mlxsw_sp
, neigh_entry
);
670 mlxsw_sp_neigh_entry_free(neigh_entry
);
673 static struct mlxsw_sp_neigh_entry
*
674 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
)
676 struct mlxsw_sp_neigh_key key
;
679 return rhashtable_lookup_fast(&mlxsw_sp
->router
.neigh_ht
,
680 &key
, mlxsw_sp_neigh_ht_params
);
684 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp
*mlxsw_sp
)
686 unsigned long interval
= NEIGH_VAR(&arp_tbl
.parms
, DELAY_PROBE_TIME
);
688 mlxsw_sp
->router
.neighs_update
.interval
= jiffies_to_msecs(interval
);
691 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp
*mlxsw_sp
,
695 struct net_device
*dev
;
701 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl
, ent_index
, &rif
, &dip
);
703 if (!mlxsw_sp
->rifs
[rif
]) {
704 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect RIF in neighbour entry\n");
709 dev
= mlxsw_sp
->rifs
[rif
]->dev
;
710 n
= neigh_lookup(&arp_tbl
, &dipn
, dev
);
712 netdev_err(dev
, "Failed to find matching neighbour for IP=%pI4h\n",
717 netdev_dbg(dev
, "Updating neighbour with IP=%pI4h\n", &dip
);
718 neigh_event_send(n
, NULL
);
722 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp
*mlxsw_sp
,
729 num_entries
= mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl
,
731 /* Hardware starts counting at 0, so add 1. */
734 /* Each record consists of several neighbour entries. */
735 for (i
= 0; i
< num_entries
; i
++) {
738 ent_index
= rec_index
* MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC
+ i
;
739 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp
, rauhtd_pl
,
745 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp
*mlxsw_sp
,
746 char *rauhtd_pl
, int rec_index
)
748 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl
, rec_index
)) {
749 case MLXSW_REG_RAUHTD_TYPE_IPV4
:
750 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp
, rauhtd_pl
,
753 case MLXSW_REG_RAUHTD_TYPE_IPV6
:
759 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl
)
761 u8 num_rec
, last_rec_index
, num_entries
;
763 num_rec
= mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl
);
764 last_rec_index
= num_rec
- 1;
766 if (num_rec
< MLXSW_REG_RAUHTD_REC_MAX_NUM
)
768 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl
, last_rec_index
) ==
769 MLXSW_REG_RAUHTD_TYPE_IPV6
)
772 num_entries
= mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl
,
774 if (++num_entries
== MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC
)
779 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp
*mlxsw_sp
)
785 rauhtd_pl
= kmalloc(MLXSW_REG_RAUHTD_LEN
, GFP_KERNEL
);
789 /* Make sure the neighbour's netdev isn't removed in the
794 mlxsw_reg_rauhtd_pack(rauhtd_pl
, MLXSW_REG_RAUHTD_TYPE_IPV4
);
795 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(rauhtd
),
798 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to dump neighbour talbe\n");
801 num_rec
= mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl
);
802 for (i
= 0; i
< num_rec
; i
++)
803 mlxsw_sp_router_neigh_rec_process(mlxsw_sp
, rauhtd_pl
,
805 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl
));
812 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp
*mlxsw_sp
)
814 struct mlxsw_sp_neigh_entry
*neigh_entry
;
816 /* Take RTNL mutex here to prevent lists from changes */
818 list_for_each_entry(neigh_entry
, &mlxsw_sp
->router
.nexthop_neighs_list
,
819 nexthop_neighs_list_node
)
820 /* If this neigh have nexthops, make the kernel think this neigh
821 * is active regardless of the traffic.
823 neigh_event_send(neigh_entry
->key
.n
, NULL
);
828 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
830 unsigned long interval
= mlxsw_sp
->router
.neighs_update
.interval
;
832 mlxsw_core_schedule_dw(&mlxsw_sp
->router
.neighs_update
.dw
,
833 msecs_to_jiffies(interval
));
836 static void mlxsw_sp_router_neighs_update_work(struct work_struct
*work
)
838 struct mlxsw_sp
*mlxsw_sp
= container_of(work
, struct mlxsw_sp
,
839 router
.neighs_update
.dw
.work
);
842 err
= mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp
);
844 dev_err(mlxsw_sp
->bus_info
->dev
, "Could not update kernel for neigh activity");
846 mlxsw_sp_router_neighs_update_nh(mlxsw_sp
);
848 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp
);
851 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct
*work
)
853 struct mlxsw_sp_neigh_entry
*neigh_entry
;
854 struct mlxsw_sp
*mlxsw_sp
= container_of(work
, struct mlxsw_sp
,
855 router
.nexthop_probe_dw
.work
);
857 /* Iterate over nexthop neighbours, find those who are unresolved and
858 * send arp on them. This solves the chicken-egg problem when
859 * the nexthop wouldn't get offloaded until the neighbor is resolved
860 * but it wouldn't get resolved ever in case traffic is flowing in HW
861 * using different nexthop.
863 * Take RTNL mutex here to prevent lists from changes.
866 list_for_each_entry(neigh_entry
, &mlxsw_sp
->router
.nexthop_neighs_list
,
867 nexthop_neighs_list_node
)
868 if (!neigh_entry
->connected
)
869 neigh_event_send(neigh_entry
->key
.n
, NULL
);
872 mlxsw_core_schedule_dw(&mlxsw_sp
->router
.nexthop_probe_dw
,
873 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL
);
877 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp
*mlxsw_sp
,
878 struct mlxsw_sp_neigh_entry
*neigh_entry
,
881 static enum mlxsw_reg_rauht_op
mlxsw_sp_rauht_op(bool adding
)
883 return adding
? MLXSW_REG_RAUHT_OP_WRITE_ADD
:
884 MLXSW_REG_RAUHT_OP_WRITE_DELETE
;
888 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp
*mlxsw_sp
,
889 struct mlxsw_sp_neigh_entry
*neigh_entry
,
890 enum mlxsw_reg_rauht_op op
)
892 struct neighbour
*n
= neigh_entry
->key
.n
;
893 u32 dip
= ntohl(*((__be32
*) n
->primary_key
));
894 char rauht_pl
[MLXSW_REG_RAUHT_LEN
];
896 mlxsw_reg_rauht_pack4(rauht_pl
, op
, neigh_entry
->rif
, neigh_entry
->ha
,
898 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rauht
), rauht_pl
);
902 mlxsw_sp_neigh_entry_update(struct mlxsw_sp
*mlxsw_sp
,
903 struct mlxsw_sp_neigh_entry
*neigh_entry
,
906 if (!adding
&& !neigh_entry
->connected
)
908 neigh_entry
->connected
= adding
;
909 if (neigh_entry
->key
.n
->tbl
== &arp_tbl
)
910 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp
, neigh_entry
,
911 mlxsw_sp_rauht_op(adding
));
916 struct mlxsw_sp_neigh_event_work
{
917 struct work_struct work
;
918 struct mlxsw_sp
*mlxsw_sp
;
922 static void mlxsw_sp_router_neigh_event_work(struct work_struct
*work
)
924 struct mlxsw_sp_neigh_event_work
*neigh_work
=
925 container_of(work
, struct mlxsw_sp_neigh_event_work
, work
);
926 struct mlxsw_sp
*mlxsw_sp
= neigh_work
->mlxsw_sp
;
927 struct mlxsw_sp_neigh_entry
*neigh_entry
;
928 struct neighbour
*n
= neigh_work
->n
;
929 unsigned char ha
[ETH_ALEN
];
930 bool entry_connected
;
933 /* If these parameters are changed after we release the lock,
934 * then we are guaranteed to receive another event letting us
937 read_lock_bh(&n
->lock
);
938 memcpy(ha
, n
->ha
, ETH_ALEN
);
939 nud_state
= n
->nud_state
;
941 read_unlock_bh(&n
->lock
);
944 entry_connected
= nud_state
& NUD_VALID
&& !dead
;
945 neigh_entry
= mlxsw_sp_neigh_entry_lookup(mlxsw_sp
, n
);
946 if (!entry_connected
&& !neigh_entry
)
949 neigh_entry
= mlxsw_sp_neigh_entry_create(mlxsw_sp
, n
);
950 if (IS_ERR(neigh_entry
))
954 memcpy(neigh_entry
->ha
, ha
, ETH_ALEN
);
955 mlxsw_sp_neigh_entry_update(mlxsw_sp
, neigh_entry
, entry_connected
);
956 mlxsw_sp_nexthop_neigh_update(mlxsw_sp
, neigh_entry
, !entry_connected
);
958 if (!neigh_entry
->connected
&& list_empty(&neigh_entry
->nexthop_list
))
959 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
967 int mlxsw_sp_router_netevent_event(struct notifier_block
*unused
,
968 unsigned long event
, void *ptr
)
970 struct mlxsw_sp_neigh_event_work
*neigh_work
;
971 struct mlxsw_sp_port
*mlxsw_sp_port
;
972 struct mlxsw_sp
*mlxsw_sp
;
973 unsigned long interval
;
974 struct neigh_parms
*p
;
978 case NETEVENT_DELAY_PROBE_TIME_UPDATE
:
981 /* We don't care about changes in the default table. */
982 if (!p
->dev
|| p
->tbl
!= &arp_tbl
)
985 /* We are in atomic context and can't take RTNL mutex,
986 * so use RCU variant to walk the device chain.
988 mlxsw_sp_port
= mlxsw_sp_port_lower_dev_hold(p
->dev
);
992 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
993 interval
= jiffies_to_msecs(NEIGH_VAR(p
, DELAY_PROBE_TIME
));
994 mlxsw_sp
->router
.neighs_update
.interval
= interval
;
996 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
998 case NETEVENT_NEIGH_UPDATE
:
1001 if (n
->tbl
!= &arp_tbl
)
1004 mlxsw_sp_port
= mlxsw_sp_port_lower_dev_hold(n
->dev
);
1008 neigh_work
= kzalloc(sizeof(*neigh_work
), GFP_ATOMIC
);
1010 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1014 INIT_WORK(&neigh_work
->work
, mlxsw_sp_router_neigh_event_work
);
1015 neigh_work
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1018 /* Take a reference to ensure the neighbour won't be
1019 * destructed until we drop the reference in delayed
1023 mlxsw_core_schedule_work(&neigh_work
->work
);
1024 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1031 static int mlxsw_sp_neigh_init(struct mlxsw_sp
*mlxsw_sp
)
1035 err
= rhashtable_init(&mlxsw_sp
->router
.neigh_ht
,
1036 &mlxsw_sp_neigh_ht_params
);
1040 /* Initialize the polling interval according to the default
1043 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp
);
1045 /* Create the delayed works for the activity_update */
1046 INIT_DELAYED_WORK(&mlxsw_sp
->router
.neighs_update
.dw
,
1047 mlxsw_sp_router_neighs_update_work
);
1048 INIT_DELAYED_WORK(&mlxsw_sp
->router
.nexthop_probe_dw
,
1049 mlxsw_sp_router_probe_unresolved_nexthops
);
1050 mlxsw_core_schedule_dw(&mlxsw_sp
->router
.neighs_update
.dw
, 0);
1051 mlxsw_core_schedule_dw(&mlxsw_sp
->router
.nexthop_probe_dw
, 0);
1055 static void mlxsw_sp_neigh_fini(struct mlxsw_sp
*mlxsw_sp
)
1057 cancel_delayed_work_sync(&mlxsw_sp
->router
.neighs_update
.dw
);
1058 cancel_delayed_work_sync(&mlxsw_sp
->router
.nexthop_probe_dw
);
1059 rhashtable_destroy(&mlxsw_sp
->router
.neigh_ht
);
1062 static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp
*mlxsw_sp
,
1063 const struct mlxsw_sp_rif
*r
)
1065 char rauht_pl
[MLXSW_REG_RAUHT_LEN
];
1067 mlxsw_reg_rauht_pack(rauht_pl
, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL
,
1069 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rauht
), rauht_pl
);
1072 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
1073 struct mlxsw_sp_rif
*r
)
1075 struct mlxsw_sp_neigh_entry
*neigh_entry
, *tmp
;
1077 mlxsw_sp_neigh_rif_flush(mlxsw_sp
, r
);
1078 list_for_each_entry_safe(neigh_entry
, tmp
, &r
->neigh_list
,
1080 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1083 struct mlxsw_sp_nexthop_key
{
1084 struct fib_nh
*fib_nh
;
1087 struct mlxsw_sp_nexthop
{
1088 struct list_head neigh_list_node
; /* member of neigh entry list */
1089 struct list_head rif_list_node
;
1090 struct mlxsw_sp_nexthop_group
*nh_grp
; /* pointer back to the group
1093 struct rhash_head ht_node
;
1094 struct mlxsw_sp_nexthop_key key
;
1095 struct mlxsw_sp_rif
*r
;
1096 u8 should_offload
:1, /* set indicates this neigh is connected and
1097 * should be put to KVD linear area of this group.
1099 offloaded
:1, /* set in case the neigh is actually put into
1100 * KVD linear area of this group.
1102 update
:1; /* set indicates that MAC of this neigh should be
1105 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1108 struct mlxsw_sp_nexthop_group_key
{
1109 struct fib_info
*fi
;
1112 struct mlxsw_sp_nexthop_group
{
1113 struct rhash_head ht_node
;
1114 struct list_head fib_list
; /* list of fib entries that use this group */
1115 struct mlxsw_sp_nexthop_group_key key
;
1116 u8 adj_index_valid
:1,
1117 gateway
:1; /* routes using the group use a gateway */
1121 struct mlxsw_sp_nexthop nexthops
[0];
1122 #define nh_rif nexthops[0].r
1125 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params
= {
1126 .key_offset
= offsetof(struct mlxsw_sp_nexthop_group
, key
),
1127 .head_offset
= offsetof(struct mlxsw_sp_nexthop_group
, ht_node
),
1128 .key_len
= sizeof(struct mlxsw_sp_nexthop_group_key
),
1131 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp
*mlxsw_sp
,
1132 struct mlxsw_sp_nexthop_group
*nh_grp
)
1134 return rhashtable_insert_fast(&mlxsw_sp
->router
.nexthop_group_ht
,
1136 mlxsw_sp_nexthop_group_ht_params
);
1139 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp
*mlxsw_sp
,
1140 struct mlxsw_sp_nexthop_group
*nh_grp
)
1142 rhashtable_remove_fast(&mlxsw_sp
->router
.nexthop_group_ht
,
1144 mlxsw_sp_nexthop_group_ht_params
);
1147 static struct mlxsw_sp_nexthop_group
*
1148 mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp
*mlxsw_sp
,
1149 struct mlxsw_sp_nexthop_group_key key
)
1151 return rhashtable_lookup_fast(&mlxsw_sp
->router
.nexthop_group_ht
, &key
,
1152 mlxsw_sp_nexthop_group_ht_params
);
1155 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params
= {
1156 .key_offset
= offsetof(struct mlxsw_sp_nexthop
, key
),
1157 .head_offset
= offsetof(struct mlxsw_sp_nexthop
, ht_node
),
1158 .key_len
= sizeof(struct mlxsw_sp_nexthop_key
),
1161 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp
*mlxsw_sp
,
1162 struct mlxsw_sp_nexthop
*nh
)
1164 return rhashtable_insert_fast(&mlxsw_sp
->router
.nexthop_ht
,
1165 &nh
->ht_node
, mlxsw_sp_nexthop_ht_params
);
1168 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp
*mlxsw_sp
,
1169 struct mlxsw_sp_nexthop
*nh
)
1171 rhashtable_remove_fast(&mlxsw_sp
->router
.nexthop_ht
, &nh
->ht_node
,
1172 mlxsw_sp_nexthop_ht_params
);
1175 static struct mlxsw_sp_nexthop
*
1176 mlxsw_sp_nexthop_lookup(struct mlxsw_sp
*mlxsw_sp
,
1177 struct mlxsw_sp_nexthop_key key
)
1179 return rhashtable_lookup_fast(&mlxsw_sp
->router
.nexthop_ht
, &key
,
1180 mlxsw_sp_nexthop_ht_params
);
1183 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp
*mlxsw_sp
,
1184 struct mlxsw_sp_vr
*vr
,
1185 u32 adj_index
, u16 ecmp_size
,
1189 char raleu_pl
[MLXSW_REG_RALEU_LEN
];
1191 mlxsw_reg_raleu_pack(raleu_pl
,
1192 (enum mlxsw_reg_ralxx_protocol
) vr
->proto
, vr
->id
,
1193 adj_index
, ecmp_size
, new_adj_index
,
1195 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raleu
), raleu_pl
);
1198 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp
*mlxsw_sp
,
1199 struct mlxsw_sp_nexthop_group
*nh_grp
,
1200 u32 old_adj_index
, u16 old_ecmp_size
)
1202 struct mlxsw_sp_fib_entry
*fib_entry
;
1203 struct mlxsw_sp_vr
*vr
= NULL
;
1206 list_for_each_entry(fib_entry
, &nh_grp
->fib_list
, nexthop_group_node
) {
1207 if (vr
== fib_entry
->fib_node
->vr
)
1209 vr
= fib_entry
->fib_node
->vr
;
1210 err
= mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp
, vr
,
1221 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp
*mlxsw_sp
, u32 adj_index
,
1222 struct mlxsw_sp_nexthop
*nh
)
1224 struct mlxsw_sp_neigh_entry
*neigh_entry
= nh
->neigh_entry
;
1225 char ratr_pl
[MLXSW_REG_RATR_LEN
];
1227 mlxsw_reg_ratr_pack(ratr_pl
, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY
,
1228 true, adj_index
, neigh_entry
->rif
);
1229 mlxsw_reg_ratr_eth_entry_pack(ratr_pl
, neigh_entry
->ha
);
1230 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ratr
), ratr_pl
);
1234 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp
*mlxsw_sp
,
1235 struct mlxsw_sp_nexthop_group
*nh_grp
,
1238 u32 adj_index
= nh_grp
->adj_index
; /* base */
1239 struct mlxsw_sp_nexthop
*nh
;
1243 for (i
= 0; i
< nh_grp
->count
; i
++) {
1244 nh
= &nh_grp
->nexthops
[i
];
1246 if (!nh
->should_offload
) {
1251 if (nh
->update
|| reallocate
) {
1252 err
= mlxsw_sp_nexthop_mac_update(mlxsw_sp
,
1264 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp
*mlxsw_sp
,
1265 struct mlxsw_sp_fib_entry
*fib_entry
);
1268 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp
*mlxsw_sp
,
1269 struct mlxsw_sp_nexthop_group
*nh_grp
)
1271 struct mlxsw_sp_fib_entry
*fib_entry
;
1274 list_for_each_entry(fib_entry
, &nh_grp
->fib_list
, nexthop_group_node
) {
1275 err
= mlxsw_sp_fib_entry_update(mlxsw_sp
, fib_entry
);
1283 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp
*mlxsw_sp
,
1284 struct mlxsw_sp_nexthop_group
*nh_grp
)
1286 struct mlxsw_sp_nexthop
*nh
;
1287 bool offload_change
= false;
1290 bool old_adj_index_valid
;
1297 if (!nh_grp
->gateway
) {
1298 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1302 for (i
= 0; i
< nh_grp
->count
; i
++) {
1303 nh
= &nh_grp
->nexthops
[i
];
1305 if (nh
->should_offload
^ nh
->offloaded
) {
1306 offload_change
= true;
1307 if (nh
->should_offload
)
1310 if (nh
->should_offload
)
1313 if (!offload_change
) {
1314 /* Nothing was added or removed, so no need to reallocate. Just
1315 * update MAC on existing adjacency indexes.
1317 err
= mlxsw_sp_nexthop_group_mac_update(mlxsw_sp
, nh_grp
,
1320 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to update neigh MAC in adjacency table.\n");
1326 /* No neigh of this group is connected so we just set
1327 * the trap and let everthing flow through kernel.
1331 ret
= mlxsw_sp_kvdl_alloc(mlxsw_sp
, ecmp_size
);
1333 /* We ran out of KVD linear space, just set the
1334 * trap and let everything flow through kernel.
1336 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to allocate KVD linear area for nexthop group.\n");
1340 old_adj_index_valid
= nh_grp
->adj_index_valid
;
1341 old_adj_index
= nh_grp
->adj_index
;
1342 old_ecmp_size
= nh_grp
->ecmp_size
;
1343 nh_grp
->adj_index_valid
= 1;
1344 nh_grp
->adj_index
= adj_index
;
1345 nh_grp
->ecmp_size
= ecmp_size
;
1346 err
= mlxsw_sp_nexthop_group_mac_update(mlxsw_sp
, nh_grp
, true);
1348 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to update neigh MAC in adjacency table.\n");
1352 if (!old_adj_index_valid
) {
1353 /* The trap was set for fib entries, so we have to call
1354 * fib entry update to unset it and use adjacency index.
1356 err
= mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1358 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to add adjacency index to fib entries.\n");
1364 err
= mlxsw_sp_adj_index_mass_update(mlxsw_sp
, nh_grp
,
1365 old_adj_index
, old_ecmp_size
);
1366 mlxsw_sp_kvdl_free(mlxsw_sp
, old_adj_index
);
1368 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to mass-update adjacency index for nexthop group.\n");
1374 old_adj_index_valid
= nh_grp
->adj_index_valid
;
1375 nh_grp
->adj_index_valid
= 0;
1376 for (i
= 0; i
< nh_grp
->count
; i
++) {
1377 nh
= &nh_grp
->nexthops
[i
];
1380 err
= mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1382 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for fib entries.\n");
1383 if (old_adj_index_valid
)
1384 mlxsw_sp_kvdl_free(mlxsw_sp
, nh_grp
->adj_index
);
1387 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop
*nh
,
1390 if (!removing
&& !nh
->should_offload
)
1391 nh
->should_offload
= 1;
1392 else if (removing
&& nh
->offloaded
)
1393 nh
->should_offload
= 0;
1398 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp
*mlxsw_sp
,
1399 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1402 struct mlxsw_sp_nexthop
*nh
;
1404 list_for_each_entry(nh
, &neigh_entry
->nexthop_list
,
1406 __mlxsw_sp_nexthop_neigh_update(nh
, removing
);
1407 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1411 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop
*nh
,
1412 struct mlxsw_sp_rif
*r
)
1418 list_add(&nh
->rif_list_node
, &r
->nexthop_list
);
1421 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop
*nh
)
1426 list_del(&nh
->rif_list_node
);
1430 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp
*mlxsw_sp
,
1431 struct mlxsw_sp_nexthop
*nh
)
1433 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1434 struct fib_nh
*fib_nh
= nh
->key
.fib_nh
;
1435 struct neighbour
*n
;
1439 if (!nh
->nh_grp
->gateway
|| nh
->neigh_entry
)
1442 /* Take a reference of neigh here ensuring that neigh would
1443 * not be detructed before the nexthop entry is finished.
1444 * The reference is taken either in neigh_lookup() or
1445 * in neigh_create() in case n is not found.
1447 n
= neigh_lookup(&arp_tbl
, &fib_nh
->nh_gw
, fib_nh
->nh_dev
);
1449 n
= neigh_create(&arp_tbl
, &fib_nh
->nh_gw
, fib_nh
->nh_dev
);
1452 neigh_event_send(n
, NULL
);
1454 neigh_entry
= mlxsw_sp_neigh_entry_lookup(mlxsw_sp
, n
);
1456 neigh_entry
= mlxsw_sp_neigh_entry_create(mlxsw_sp
, n
);
1457 if (IS_ERR(neigh_entry
)) {
1459 goto err_neigh_entry_create
;
1463 /* If that is the first nexthop connected to that neigh, add to
1464 * nexthop_neighs_list
1466 if (list_empty(&neigh_entry
->nexthop_list
))
1467 list_add_tail(&neigh_entry
->nexthop_neighs_list_node
,
1468 &mlxsw_sp
->router
.nexthop_neighs_list
);
1470 nh
->neigh_entry
= neigh_entry
;
1471 list_add_tail(&nh
->neigh_list_node
, &neigh_entry
->nexthop_list
);
1472 read_lock_bh(&n
->lock
);
1473 nud_state
= n
->nud_state
;
1475 read_unlock_bh(&n
->lock
);
1476 __mlxsw_sp_nexthop_neigh_update(nh
, !(nud_state
& NUD_VALID
&& !dead
));
1480 err_neigh_entry_create
:
1485 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp
*mlxsw_sp
,
1486 struct mlxsw_sp_nexthop
*nh
)
1488 struct mlxsw_sp_neigh_entry
*neigh_entry
= nh
->neigh_entry
;
1489 struct neighbour
*n
;
1493 n
= neigh_entry
->key
.n
;
1495 __mlxsw_sp_nexthop_neigh_update(nh
, true);
1496 list_del(&nh
->neigh_list_node
);
1497 nh
->neigh_entry
= NULL
;
1499 /* If that is the last nexthop connected to that neigh, remove from
1500 * nexthop_neighs_list
1502 if (list_empty(&neigh_entry
->nexthop_list
))
1503 list_del(&neigh_entry
->nexthop_neighs_list_node
);
1505 if (!neigh_entry
->connected
&& list_empty(&neigh_entry
->nexthop_list
))
1506 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1511 static int mlxsw_sp_nexthop_init(struct mlxsw_sp
*mlxsw_sp
,
1512 struct mlxsw_sp_nexthop_group
*nh_grp
,
1513 struct mlxsw_sp_nexthop
*nh
,
1514 struct fib_nh
*fib_nh
)
1516 struct net_device
*dev
= fib_nh
->nh_dev
;
1517 struct in_device
*in_dev
;
1518 struct mlxsw_sp_rif
*r
;
1521 nh
->nh_grp
= nh_grp
;
1522 nh
->key
.fib_nh
= fib_nh
;
1523 err
= mlxsw_sp_nexthop_insert(mlxsw_sp
, nh
);
1530 in_dev
= __in_dev_get_rtnl(dev
);
1531 if (in_dev
&& IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
) &&
1532 fib_nh
->nh_flags
& RTNH_F_LINKDOWN
)
1535 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
1538 mlxsw_sp_nexthop_rif_init(nh
, r
);
1540 err
= mlxsw_sp_nexthop_neigh_init(mlxsw_sp
, nh
);
1542 goto err_nexthop_neigh_init
;
1546 err_nexthop_neigh_init
:
1547 mlxsw_sp_nexthop_remove(mlxsw_sp
, nh
);
1551 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp
*mlxsw_sp
,
1552 struct mlxsw_sp_nexthop
*nh
)
1554 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1555 mlxsw_sp_nexthop_rif_fini(nh
);
1556 mlxsw_sp_nexthop_remove(mlxsw_sp
, nh
);
1559 static void mlxsw_sp_nexthop_event(struct mlxsw_sp
*mlxsw_sp
,
1560 unsigned long event
, struct fib_nh
*fib_nh
)
1562 struct mlxsw_sp_nexthop_key key
;
1563 struct mlxsw_sp_nexthop
*nh
;
1564 struct mlxsw_sp_rif
*r
;
1566 if (mlxsw_sp
->router
.aborted
)
1569 key
.fib_nh
= fib_nh
;
1570 nh
= mlxsw_sp_nexthop_lookup(mlxsw_sp
, key
);
1571 if (WARN_ON_ONCE(!nh
))
1574 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, fib_nh
->nh_dev
);
1579 case FIB_EVENT_NH_ADD
:
1580 mlxsw_sp_nexthop_rif_init(nh
, r
);
1581 mlxsw_sp_nexthop_neigh_init(mlxsw_sp
, nh
);
1583 case FIB_EVENT_NH_DEL
:
1584 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1585 mlxsw_sp_nexthop_rif_fini(nh
);
1589 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1592 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
1593 struct mlxsw_sp_rif
*r
)
1595 struct mlxsw_sp_nexthop
*nh
, *tmp
;
1597 list_for_each_entry_safe(nh
, tmp
, &r
->nexthop_list
, rif_list_node
) {
1598 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1599 mlxsw_sp_nexthop_rif_fini(nh
);
1600 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1604 static struct mlxsw_sp_nexthop_group
*
1605 mlxsw_sp_nexthop_group_create(struct mlxsw_sp
*mlxsw_sp
, struct fib_info
*fi
)
1607 struct mlxsw_sp_nexthop_group
*nh_grp
;
1608 struct mlxsw_sp_nexthop
*nh
;
1609 struct fib_nh
*fib_nh
;
1614 alloc_size
= sizeof(*nh_grp
) +
1615 fi
->fib_nhs
* sizeof(struct mlxsw_sp_nexthop
);
1616 nh_grp
= kzalloc(alloc_size
, GFP_KERNEL
);
1618 return ERR_PTR(-ENOMEM
);
1619 INIT_LIST_HEAD(&nh_grp
->fib_list
);
1620 nh_grp
->gateway
= fi
->fib_nh
->nh_scope
== RT_SCOPE_LINK
;
1621 nh_grp
->count
= fi
->fib_nhs
;
1622 nh_grp
->key
.fi
= fi
;
1623 for (i
= 0; i
< nh_grp
->count
; i
++) {
1624 nh
= &nh_grp
->nexthops
[i
];
1625 fib_nh
= &fi
->fib_nh
[i
];
1626 err
= mlxsw_sp_nexthop_init(mlxsw_sp
, nh_grp
, nh
, fib_nh
);
1628 goto err_nexthop_init
;
1630 err
= mlxsw_sp_nexthop_group_insert(mlxsw_sp
, nh_grp
);
1632 goto err_nexthop_group_insert
;
1633 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh_grp
);
1636 err_nexthop_group_insert
:
1638 for (i
--; i
>= 0; i
--) {
1639 nh
= &nh_grp
->nexthops
[i
];
1640 mlxsw_sp_nexthop_fini(mlxsw_sp
, nh
);
1643 return ERR_PTR(err
);
1647 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp
*mlxsw_sp
,
1648 struct mlxsw_sp_nexthop_group
*nh_grp
)
1650 struct mlxsw_sp_nexthop
*nh
;
1653 mlxsw_sp_nexthop_group_remove(mlxsw_sp
, nh_grp
);
1654 for (i
= 0; i
< nh_grp
->count
; i
++) {
1655 nh
= &nh_grp
->nexthops
[i
];
1656 mlxsw_sp_nexthop_fini(mlxsw_sp
, nh
);
1658 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh_grp
);
1659 WARN_ON_ONCE(nh_grp
->adj_index_valid
);
1663 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp
*mlxsw_sp
,
1664 struct mlxsw_sp_fib_entry
*fib_entry
,
1665 struct fib_info
*fi
)
1667 struct mlxsw_sp_nexthop_group_key key
;
1668 struct mlxsw_sp_nexthop_group
*nh_grp
;
1671 nh_grp
= mlxsw_sp_nexthop_group_lookup(mlxsw_sp
, key
);
1673 nh_grp
= mlxsw_sp_nexthop_group_create(mlxsw_sp
, fi
);
1675 return PTR_ERR(nh_grp
);
1677 list_add_tail(&fib_entry
->nexthop_group_node
, &nh_grp
->fib_list
);
1678 fib_entry
->nh_group
= nh_grp
;
1682 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp
*mlxsw_sp
,
1683 struct mlxsw_sp_fib_entry
*fib_entry
)
1685 struct mlxsw_sp_nexthop_group
*nh_grp
= fib_entry
->nh_group
;
1687 list_del(&fib_entry
->nexthop_group_node
);
1688 if (!list_empty(&nh_grp
->fib_list
))
1690 mlxsw_sp_nexthop_group_destroy(mlxsw_sp
, nh_grp
);
1694 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry
*fib_entry
)
1696 struct mlxsw_sp_nexthop_group
*nh_group
= fib_entry
->nh_group
;
1698 if (fib_entry
->params
.tos
)
1701 switch (fib_entry
->type
) {
1702 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
:
1703 return !!nh_group
->adj_index_valid
;
1704 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
:
1705 return !!nh_group
->nh_rif
;
1711 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry
*fib_entry
)
1713 fib_entry
->offloaded
= true;
1715 switch (fib_entry
->fib_node
->vr
->proto
) {
1716 case MLXSW_SP_L3_PROTO_IPV4
:
1717 fib_info_offload_inc(fib_entry
->nh_group
->key
.fi
);
1719 case MLXSW_SP_L3_PROTO_IPV6
:
1725 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry
*fib_entry
)
1727 switch (fib_entry
->fib_node
->vr
->proto
) {
1728 case MLXSW_SP_L3_PROTO_IPV4
:
1729 fib_info_offload_dec(fib_entry
->nh_group
->key
.fi
);
1731 case MLXSW_SP_L3_PROTO_IPV6
:
1735 fib_entry
->offloaded
= false;
1739 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry
*fib_entry
,
1740 enum mlxsw_reg_ralue_op op
, int err
)
1743 case MLXSW_REG_RALUE_OP_WRITE_DELETE
:
1744 if (!fib_entry
->offloaded
)
1746 return mlxsw_sp_fib_entry_offload_unset(fib_entry
);
1747 case MLXSW_REG_RALUE_OP_WRITE_WRITE
:
1750 if (mlxsw_sp_fib_entry_should_offload(fib_entry
) &&
1751 !fib_entry
->offloaded
)
1752 mlxsw_sp_fib_entry_offload_set(fib_entry
);
1753 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry
) &&
1754 fib_entry
->offloaded
)
1755 mlxsw_sp_fib_entry_offload_unset(fib_entry
);
1762 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp
*mlxsw_sp
,
1763 struct mlxsw_sp_fib_entry
*fib_entry
,
1764 enum mlxsw_reg_ralue_op op
)
1766 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
1767 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
1768 struct mlxsw_sp_vr
*vr
= fib_entry
->fib_node
->vr
;
1769 enum mlxsw_reg_ralue_trap_action trap_action
;
1771 u32 adjacency_index
= 0;
1774 /* In case the nexthop group adjacency index is valid, use it
1775 * with provided ECMP size. Otherwise, setup trap and pass
1776 * traffic to kernel.
1778 if (mlxsw_sp_fib_entry_should_offload(fib_entry
)) {
1779 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_NOP
;
1780 adjacency_index
= fib_entry
->nh_group
->adj_index
;
1781 ecmp_size
= fib_entry
->nh_group
->ecmp_size
;
1783 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_TRAP
;
1784 trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
;
1787 mlxsw_reg_ralue_pack4(ralue_pl
,
1788 (enum mlxsw_reg_ralxx_protocol
) vr
->proto
, op
,
1789 vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
1791 mlxsw_reg_ralue_act_remote_pack(ralue_pl
, trap_action
, trap_id
,
1792 adjacency_index
, ecmp_size
);
1793 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
1796 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp
*mlxsw_sp
,
1797 struct mlxsw_sp_fib_entry
*fib_entry
,
1798 enum mlxsw_reg_ralue_op op
)
1800 struct mlxsw_sp_rif
*r
= fib_entry
->nh_group
->nh_rif
;
1801 enum mlxsw_reg_ralue_trap_action trap_action
;
1802 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
1803 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
1804 struct mlxsw_sp_vr
*vr
= fib_entry
->fib_node
->vr
;
1808 if (mlxsw_sp_fib_entry_should_offload(fib_entry
)) {
1809 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_NOP
;
1812 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_TRAP
;
1813 trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
;
1816 mlxsw_reg_ralue_pack4(ralue_pl
,
1817 (enum mlxsw_reg_ralxx_protocol
) vr
->proto
, op
,
1818 vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
1820 mlxsw_reg_ralue_act_local_pack(ralue_pl
, trap_action
, trap_id
, rif
);
1821 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
1824 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp
*mlxsw_sp
,
1825 struct mlxsw_sp_fib_entry
*fib_entry
,
1826 enum mlxsw_reg_ralue_op op
)
1828 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
1829 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
1830 struct mlxsw_sp_vr
*vr
= fib_entry
->fib_node
->vr
;
1832 mlxsw_reg_ralue_pack4(ralue_pl
,
1833 (enum mlxsw_reg_ralxx_protocol
) vr
->proto
, op
,
1834 vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
1836 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl
);
1837 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
1840 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp
*mlxsw_sp
,
1841 struct mlxsw_sp_fib_entry
*fib_entry
,
1842 enum mlxsw_reg_ralue_op op
)
1844 switch (fib_entry
->type
) {
1845 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
:
1846 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp
, fib_entry
, op
);
1847 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
:
1848 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp
, fib_entry
, op
);
1849 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP
:
1850 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp
, fib_entry
, op
);
1855 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp
*mlxsw_sp
,
1856 struct mlxsw_sp_fib_entry
*fib_entry
,
1857 enum mlxsw_reg_ralue_op op
)
1861 switch (fib_entry
->fib_node
->vr
->proto
) {
1862 case MLXSW_SP_L3_PROTO_IPV4
:
1863 err
= mlxsw_sp_fib_entry_op4(mlxsw_sp
, fib_entry
, op
);
1865 case MLXSW_SP_L3_PROTO_IPV6
:
1868 mlxsw_sp_fib_entry_offload_refresh(fib_entry
, op
, err
);
1872 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp
*mlxsw_sp
,
1873 struct mlxsw_sp_fib_entry
*fib_entry
)
1875 return mlxsw_sp_fib_entry_op(mlxsw_sp
, fib_entry
,
1876 MLXSW_REG_RALUE_OP_WRITE_WRITE
);
1879 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp
*mlxsw_sp
,
1880 struct mlxsw_sp_fib_entry
*fib_entry
)
1882 return mlxsw_sp_fib_entry_op(mlxsw_sp
, fib_entry
,
1883 MLXSW_REG_RALUE_OP_WRITE_DELETE
);
1887 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp
*mlxsw_sp
,
1888 const struct fib_entry_notifier_info
*fen_info
,
1889 struct mlxsw_sp_fib_entry
*fib_entry
)
1891 struct fib_info
*fi
= fen_info
->fi
;
1893 switch (fen_info
->type
) {
1894 case RTN_BROADCAST
: /* fall through */
1896 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_TRAP
;
1898 case RTN_UNREACHABLE
: /* fall through */
1899 case RTN_BLACKHOLE
: /* fall through */
1901 /* Packets hitting these routes need to be trapped, but
1902 * can do so with a lower priority than packets directed
1903 * at the host, so use action type local instead of trap.
1905 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
;
1908 if (fi
->fib_nh
->nh_scope
!= RT_SCOPE_LINK
)
1909 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
;
1911 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
;
1918 static struct mlxsw_sp_fib_entry
*
1919 mlxsw_sp_fib4_entry_create(struct mlxsw_sp
*mlxsw_sp
,
1920 struct mlxsw_sp_fib_node
*fib_node
,
1921 const struct fib_entry_notifier_info
*fen_info
)
1923 struct mlxsw_sp_fib_entry
*fib_entry
;
1926 fib_entry
= kzalloc(sizeof(*fib_entry
), GFP_KERNEL
);
1929 goto err_fib_entry_alloc
;
1932 err
= mlxsw_sp_fib4_entry_type_set(mlxsw_sp
, fen_info
, fib_entry
);
1934 goto err_fib4_entry_type_set
;
1936 err
= mlxsw_sp_nexthop_group_get(mlxsw_sp
, fib_entry
, fen_info
->fi
);
1938 goto err_nexthop_group_get
;
1940 fib_entry
->params
.prio
= fen_info
->fi
->fib_priority
;
1941 fib_entry
->params
.tb_id
= fen_info
->tb_id
;
1942 fib_entry
->params
.type
= fen_info
->type
;
1943 fib_entry
->params
.tos
= fen_info
->tos
;
1945 fib_entry
->fib_node
= fib_node
;
1949 err_nexthop_group_get
:
1950 err_fib4_entry_type_set
:
1952 err_fib_entry_alloc
:
1953 return ERR_PTR(err
);
1956 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
1957 struct mlxsw_sp_fib_entry
*fib_entry
)
1959 mlxsw_sp_nexthop_group_put(mlxsw_sp
, fib_entry
);
1963 static struct mlxsw_sp_fib_node
*
1964 mlxsw_sp_fib4_node_get(struct mlxsw_sp
*mlxsw_sp
,
1965 const struct fib_entry_notifier_info
*fen_info
);
1967 static struct mlxsw_sp_fib_entry
*
1968 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp
*mlxsw_sp
,
1969 const struct fib_entry_notifier_info
*fen_info
)
1971 struct mlxsw_sp_fib_entry
*fib_entry
;
1972 struct mlxsw_sp_fib_node
*fib_node
;
1974 fib_node
= mlxsw_sp_fib4_node_get(mlxsw_sp
, fen_info
);
1975 if (IS_ERR(fib_node
))
1978 list_for_each_entry(fib_entry
, &fib_node
->entry_list
, list
) {
1979 if (fib_entry
->params
.tb_id
== fen_info
->tb_id
&&
1980 fib_entry
->params
.tos
== fen_info
->tos
&&
1981 fib_entry
->params
.type
== fen_info
->type
&&
1982 fib_entry
->nh_group
->key
.fi
== fen_info
->fi
) {
1990 static const struct rhashtable_params mlxsw_sp_fib_ht_params
= {
1991 .key_offset
= offsetof(struct mlxsw_sp_fib_node
, key
),
1992 .head_offset
= offsetof(struct mlxsw_sp_fib_node
, ht_node
),
1993 .key_len
= sizeof(struct mlxsw_sp_fib_key
),
1994 .automatic_shrinking
= true,
1997 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib
*fib
,
1998 struct mlxsw_sp_fib_node
*fib_node
)
2000 return rhashtable_insert_fast(&fib
->ht
, &fib_node
->ht_node
,
2001 mlxsw_sp_fib_ht_params
);
2004 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib
*fib
,
2005 struct mlxsw_sp_fib_node
*fib_node
)
2007 rhashtable_remove_fast(&fib
->ht
, &fib_node
->ht_node
,
2008 mlxsw_sp_fib_ht_params
);
2011 static struct mlxsw_sp_fib_node
*
2012 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib
*fib
, const void *addr
,
2013 size_t addr_len
, unsigned char prefix_len
)
2015 struct mlxsw_sp_fib_key key
;
2017 memset(&key
, 0, sizeof(key
));
2018 memcpy(key
.addr
, addr
, addr_len
);
2019 key
.prefix_len
= prefix_len
;
2020 return rhashtable_lookup_fast(&fib
->ht
, &key
, mlxsw_sp_fib_ht_params
);
2023 static struct mlxsw_sp_fib_node
*
2024 mlxsw_sp_fib_node_create(struct mlxsw_sp_vr
*vr
, const void *addr
,
2025 size_t addr_len
, unsigned char prefix_len
)
2027 struct mlxsw_sp_fib_node
*fib_node
;
2029 fib_node
= kzalloc(sizeof(*fib_node
), GFP_KERNEL
);
2033 INIT_LIST_HEAD(&fib_node
->entry_list
);
2034 list_add(&fib_node
->list
, &vr
->fib
->node_list
);
2035 memcpy(fib_node
->key
.addr
, addr
, addr_len
);
2036 fib_node
->key
.prefix_len
= prefix_len
;
2037 mlxsw_sp_fib_node_insert(vr
->fib
, fib_node
);
2043 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node
*fib_node
)
2045 mlxsw_sp_fib_node_remove(fib_node
->vr
->fib
, fib_node
);
2046 list_del(&fib_node
->list
);
2047 WARN_ON(!list_empty(&fib_node
->entry_list
));
2052 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node
*fib_node
,
2053 const struct mlxsw_sp_fib_entry
*fib_entry
)
2055 return list_first_entry(&fib_node
->entry_list
,
2056 struct mlxsw_sp_fib_entry
, list
) == fib_entry
;
2059 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node
*fib_node
)
2061 unsigned char prefix_len
= fib_node
->key
.prefix_len
;
2062 struct mlxsw_sp_fib
*fib
= fib_node
->vr
->fib
;
2064 if (fib
->prefix_ref_count
[prefix_len
]++ == 0)
2065 mlxsw_sp_prefix_usage_set(&fib
->prefix_usage
, prefix_len
);
2068 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node
*fib_node
)
2070 unsigned char prefix_len
= fib_node
->key
.prefix_len
;
2071 struct mlxsw_sp_fib
*fib
= fib_node
->vr
->fib
;
2073 if (--fib
->prefix_ref_count
[prefix_len
] == 0)
2074 mlxsw_sp_prefix_usage_clear(&fib
->prefix_usage
, prefix_len
);
2077 static struct mlxsw_sp_fib_node
*
2078 mlxsw_sp_fib4_node_get(struct mlxsw_sp
*mlxsw_sp
,
2079 const struct fib_entry_notifier_info
*fen_info
)
2081 struct mlxsw_sp_fib_node
*fib_node
;
2082 struct mlxsw_sp_vr
*vr
;
2085 vr
= mlxsw_sp_vr_get(mlxsw_sp
, fen_info
->dst_len
, fen_info
->tb_id
,
2086 MLXSW_SP_L3_PROTO_IPV4
);
2088 return ERR_CAST(vr
);
2090 fib_node
= mlxsw_sp_fib_node_lookup(vr
->fib
, &fen_info
->dst
,
2091 sizeof(fen_info
->dst
),
2096 fib_node
= mlxsw_sp_fib_node_create(vr
, &fen_info
->dst
,
2097 sizeof(fen_info
->dst
),
2101 goto err_fib_node_create
;
2106 err_fib_node_create
:
2107 mlxsw_sp_vr_put(mlxsw_sp
, vr
);
2108 return ERR_PTR(err
);
2111 static void mlxsw_sp_fib4_node_put(struct mlxsw_sp
*mlxsw_sp
,
2112 struct mlxsw_sp_fib_node
*fib_node
)
2114 struct mlxsw_sp_vr
*vr
= fib_node
->vr
;
2116 if (!list_empty(&fib_node
->entry_list
))
2118 mlxsw_sp_fib_node_destroy(fib_node
);
2119 mlxsw_sp_vr_put(mlxsw_sp
, vr
);
2122 static struct mlxsw_sp_fib_entry
*
2123 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node
*fib_node
,
2124 const struct mlxsw_sp_fib_entry_params
*params
)
2126 struct mlxsw_sp_fib_entry
*fib_entry
;
2128 list_for_each_entry(fib_entry
, &fib_node
->entry_list
, list
) {
2129 if (fib_entry
->params
.tb_id
> params
->tb_id
)
2131 if (fib_entry
->params
.tb_id
!= params
->tb_id
)
2133 if (fib_entry
->params
.tos
> params
->tos
)
2135 if (fib_entry
->params
.prio
>= params
->prio
||
2136 fib_entry
->params
.tos
< params
->tos
)
2143 static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry
*fib_entry
,
2144 struct mlxsw_sp_fib_entry
*new_entry
)
2146 struct mlxsw_sp_fib_node
*fib_node
;
2148 if (WARN_ON(!fib_entry
))
2151 fib_node
= fib_entry
->fib_node
;
2152 list_for_each_entry_from(fib_entry
, &fib_node
->entry_list
, list
) {
2153 if (fib_entry
->params
.tb_id
!= new_entry
->params
.tb_id
||
2154 fib_entry
->params
.tos
!= new_entry
->params
.tos
||
2155 fib_entry
->params
.prio
!= new_entry
->params
.prio
)
2159 list_add_tail(&new_entry
->list
, &fib_entry
->list
);
2164 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node
*fib_node
,
2165 struct mlxsw_sp_fib_entry
*new_entry
,
2166 bool replace
, bool append
)
2168 struct mlxsw_sp_fib_entry
*fib_entry
;
2170 fib_entry
= mlxsw_sp_fib4_node_entry_find(fib_node
, &new_entry
->params
);
2173 return mlxsw_sp_fib4_node_list_append(fib_entry
, new_entry
);
2174 if (replace
&& WARN_ON(!fib_entry
))
2177 /* Insert new entry before replaced one, so that we can later
2178 * remove the second.
2181 list_add_tail(&new_entry
->list
, &fib_entry
->list
);
2183 struct mlxsw_sp_fib_entry
*last
;
2185 list_for_each_entry(last
, &fib_node
->entry_list
, list
) {
2186 if (new_entry
->params
.tb_id
> last
->params
.tb_id
)
2192 list_add(&new_entry
->list
, &fib_entry
->list
);
2194 list_add(&new_entry
->list
, &fib_node
->entry_list
);
2201 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry
*fib_entry
)
2203 list_del(&fib_entry
->list
);
2207 mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp
*mlxsw_sp
,
2208 const struct mlxsw_sp_fib_node
*fib_node
,
2209 struct mlxsw_sp_fib_entry
*fib_entry
)
2211 if (!mlxsw_sp_fib_node_entry_is_first(fib_node
, fib_entry
))
2214 /* To prevent packet loss, overwrite the previously offloaded
2217 if (!list_is_singular(&fib_node
->entry_list
)) {
2218 enum mlxsw_reg_ralue_op op
= MLXSW_REG_RALUE_OP_WRITE_DELETE
;
2219 struct mlxsw_sp_fib_entry
*n
= list_next_entry(fib_entry
, list
);
2221 mlxsw_sp_fib_entry_offload_refresh(n
, op
, 0);
2224 return mlxsw_sp_fib_entry_update(mlxsw_sp
, fib_entry
);
2228 mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp
*mlxsw_sp
,
2229 const struct mlxsw_sp_fib_node
*fib_node
,
2230 struct mlxsw_sp_fib_entry
*fib_entry
)
2232 if (!mlxsw_sp_fib_node_entry_is_first(fib_node
, fib_entry
))
2235 /* Promote the next entry by overwriting the deleted entry */
2236 if (!list_is_singular(&fib_node
->entry_list
)) {
2237 struct mlxsw_sp_fib_entry
*n
= list_next_entry(fib_entry
, list
);
2238 enum mlxsw_reg_ralue_op op
= MLXSW_REG_RALUE_OP_WRITE_DELETE
;
2240 mlxsw_sp_fib_entry_update(mlxsw_sp
, n
);
2241 mlxsw_sp_fib_entry_offload_refresh(fib_entry
, op
, 0);
2245 mlxsw_sp_fib_entry_del(mlxsw_sp
, fib_entry
);
2248 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp
*mlxsw_sp
,
2249 struct mlxsw_sp_fib_entry
*fib_entry
,
2250 bool replace
, bool append
)
2252 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2255 err
= mlxsw_sp_fib4_node_list_insert(fib_node
, fib_entry
, replace
,
2260 err
= mlxsw_sp_fib4_node_entry_add(mlxsw_sp
, fib_node
, fib_entry
);
2262 goto err_fib4_node_entry_add
;
2264 mlxsw_sp_fib_node_prefix_inc(fib_node
);
2268 err_fib4_node_entry_add
:
2269 mlxsw_sp_fib4_node_list_remove(fib_entry
);
2274 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp
*mlxsw_sp
,
2275 struct mlxsw_sp_fib_entry
*fib_entry
)
2277 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2279 mlxsw_sp_fib_node_prefix_dec(fib_node
);
2280 mlxsw_sp_fib4_node_entry_del(mlxsw_sp
, fib_node
, fib_entry
);
2281 mlxsw_sp_fib4_node_list_remove(fib_entry
);
2284 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp
*mlxsw_sp
,
2285 struct mlxsw_sp_fib_entry
*fib_entry
,
2288 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2289 struct mlxsw_sp_fib_entry
*replaced
;
2294 /* We inserted the new entry before replaced one */
2295 replaced
= list_next_entry(fib_entry
, list
);
2297 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, replaced
);
2298 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, replaced
);
2299 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2303 mlxsw_sp_router_fib4_add(struct mlxsw_sp
*mlxsw_sp
,
2304 const struct fib_entry_notifier_info
*fen_info
,
2305 bool replace
, bool append
)
2307 struct mlxsw_sp_fib_entry
*fib_entry
;
2308 struct mlxsw_sp_fib_node
*fib_node
;
2311 if (mlxsw_sp
->router
.aborted
)
2314 fib_node
= mlxsw_sp_fib4_node_get(mlxsw_sp
, fen_info
);
2315 if (IS_ERR(fib_node
)) {
2316 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to get FIB node\n");
2317 return PTR_ERR(fib_node
);
2320 fib_entry
= mlxsw_sp_fib4_entry_create(mlxsw_sp
, fib_node
, fen_info
);
2321 if (IS_ERR(fib_entry
)) {
2322 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to create FIB entry\n");
2323 err
= PTR_ERR(fib_entry
);
2324 goto err_fib4_entry_create
;
2327 err
= mlxsw_sp_fib4_node_entry_link(mlxsw_sp
, fib_entry
, replace
,
2330 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to link FIB entry to node\n");
2331 goto err_fib4_node_entry_link
;
2334 mlxsw_sp_fib4_entry_replace(mlxsw_sp
, fib_entry
, replace
);
2338 err_fib4_node_entry_link
:
2339 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2340 err_fib4_entry_create
:
2341 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2345 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp
*mlxsw_sp
,
2346 struct fib_entry_notifier_info
*fen_info
)
2348 struct mlxsw_sp_fib_entry
*fib_entry
;
2349 struct mlxsw_sp_fib_node
*fib_node
;
2351 if (mlxsw_sp
->router
.aborted
)
2354 fib_entry
= mlxsw_sp_fib4_entry_lookup(mlxsw_sp
, fen_info
);
2355 if (WARN_ON(!fib_entry
))
2357 fib_node
= fib_entry
->fib_node
;
2359 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, fib_entry
);
2360 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2361 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2364 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp
*mlxsw_sp
)
2366 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
2367 char ralst_pl
[MLXSW_REG_RALST_LEN
];
2368 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
2369 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
2372 mlxsw_reg_ralta_pack(ralta_pl
, true, MLXSW_REG_RALXX_PROTOCOL_IPV4
,
2373 MLXSW_SP_LPM_TREE_MIN
);
2374 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
2378 mlxsw_reg_ralst_pack(ralst_pl
, 0xff, MLXSW_SP_LPM_TREE_MIN
);
2379 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralst
), ralst_pl
);
2383 mlxsw_reg_raltb_pack(raltb_pl
, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4
,
2384 MLXSW_SP_LPM_TREE_MIN
);
2385 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
), raltb_pl
);
2389 mlxsw_reg_ralue_pack4(ralue_pl
, MLXSW_SP_L3_PROTO_IPV4
,
2390 MLXSW_REG_RALUE_OP_WRITE_WRITE
, 0, 0, 0);
2391 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl
);
2392 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
2395 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp
*mlxsw_sp
,
2396 struct mlxsw_sp_fib_node
*fib_node
)
2398 struct mlxsw_sp_fib_entry
*fib_entry
, *tmp
;
2400 list_for_each_entry_safe(fib_entry
, tmp
, &fib_node
->entry_list
, list
) {
2401 bool do_break
= &tmp
->list
== &fib_node
->entry_list
;
2403 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, fib_entry
);
2404 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2405 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2406 /* Break when entry list is empty and node was freed.
2407 * Otherwise, we'll access freed memory in the next
2415 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp
*mlxsw_sp
,
2416 struct mlxsw_sp_fib_node
*fib_node
)
2418 switch (fib_node
->vr
->proto
) {
2419 case MLXSW_SP_L3_PROTO_IPV4
:
2420 mlxsw_sp_fib4_node_flush(mlxsw_sp
, fib_node
);
2422 case MLXSW_SP_L3_PROTO_IPV6
:
2428 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp
*mlxsw_sp
)
2430 struct mlxsw_sp_fib_node
*fib_node
, *tmp
;
2431 struct mlxsw_sp_vr
*vr
;
2434 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
2435 vr
= &mlxsw_sp
->router
.vrs
[i
];
2440 list_for_each_entry_safe(fib_node
, tmp
, &vr
->fib
->node_list
,
2442 bool do_break
= &tmp
->list
== &vr
->fib
->node_list
;
2444 mlxsw_sp_fib_node_flush(mlxsw_sp
, fib_node
);
2451 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp
*mlxsw_sp
)
2455 if (mlxsw_sp
->router
.aborted
)
2457 dev_warn(mlxsw_sp
->bus_info
->dev
, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
2458 mlxsw_sp_router_fib_flush(mlxsw_sp
);
2459 mlxsw_sp
->router
.aborted
= true;
2460 err
= mlxsw_sp_router_set_abort_trap(mlxsw_sp
);
2462 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to set abort trap.\n");
2465 struct mlxsw_sp_fib_event_work
{
2466 struct work_struct work
;
2468 struct fib_entry_notifier_info fen_info
;
2469 struct fib_nh_notifier_info fnh_info
;
2471 struct mlxsw_sp
*mlxsw_sp
;
2472 unsigned long event
;
2475 static void mlxsw_sp_router_fib_event_work(struct work_struct
*work
)
2477 struct mlxsw_sp_fib_event_work
*fib_work
=
2478 container_of(work
, struct mlxsw_sp_fib_event_work
, work
);
2479 struct mlxsw_sp
*mlxsw_sp
= fib_work
->mlxsw_sp
;
2480 bool replace
, append
;
2483 /* Protect internal structures from changes */
2485 switch (fib_work
->event
) {
2486 case FIB_EVENT_ENTRY_REPLACE
: /* fall through */
2487 case FIB_EVENT_ENTRY_APPEND
: /* fall through */
2488 case FIB_EVENT_ENTRY_ADD
:
2489 replace
= fib_work
->event
== FIB_EVENT_ENTRY_REPLACE
;
2490 append
= fib_work
->event
== FIB_EVENT_ENTRY_APPEND
;
2491 err
= mlxsw_sp_router_fib4_add(mlxsw_sp
, &fib_work
->fen_info
,
2494 mlxsw_sp_router_fib4_abort(mlxsw_sp
);
2495 fib_info_put(fib_work
->fen_info
.fi
);
2497 case FIB_EVENT_ENTRY_DEL
:
2498 mlxsw_sp_router_fib4_del(mlxsw_sp
, &fib_work
->fen_info
);
2499 fib_info_put(fib_work
->fen_info
.fi
);
2501 case FIB_EVENT_RULE_ADD
: /* fall through */
2502 case FIB_EVENT_RULE_DEL
:
2503 mlxsw_sp_router_fib4_abort(mlxsw_sp
);
2505 case FIB_EVENT_NH_ADD
: /* fall through */
2506 case FIB_EVENT_NH_DEL
:
2507 mlxsw_sp_nexthop_event(mlxsw_sp
, fib_work
->event
,
2508 fib_work
->fnh_info
.fib_nh
);
2509 fib_info_put(fib_work
->fnh_info
.fib_nh
->nh_parent
);
2516 /* Called with rcu_read_lock() */
2517 static int mlxsw_sp_router_fib_event(struct notifier_block
*nb
,
2518 unsigned long event
, void *ptr
)
2520 struct mlxsw_sp
*mlxsw_sp
= container_of(nb
, struct mlxsw_sp
, fib_nb
);
2521 struct mlxsw_sp_fib_event_work
*fib_work
;
2522 struct fib_notifier_info
*info
= ptr
;
2524 if (!net_eq(info
->net
, &init_net
))
2527 fib_work
= kzalloc(sizeof(*fib_work
), GFP_ATOMIC
);
2528 if (WARN_ON(!fib_work
))
2531 INIT_WORK(&fib_work
->work
, mlxsw_sp_router_fib_event_work
);
2532 fib_work
->mlxsw_sp
= mlxsw_sp
;
2533 fib_work
->event
= event
;
2536 case FIB_EVENT_ENTRY_REPLACE
: /* fall through */
2537 case FIB_EVENT_ENTRY_APPEND
: /* fall through */
2538 case FIB_EVENT_ENTRY_ADD
: /* fall through */
2539 case FIB_EVENT_ENTRY_DEL
:
2540 memcpy(&fib_work
->fen_info
, ptr
, sizeof(fib_work
->fen_info
));
2541 /* Take referece on fib_info to prevent it from being
2542 * freed while work is queued. Release it afterwards.
2544 fib_info_hold(fib_work
->fen_info
.fi
);
2546 case FIB_EVENT_NH_ADD
: /* fall through */
2547 case FIB_EVENT_NH_DEL
:
2548 memcpy(&fib_work
->fnh_info
, ptr
, sizeof(fib_work
->fnh_info
));
2549 fib_info_hold(fib_work
->fnh_info
.fib_nh
->nh_parent
);
2553 mlxsw_core_schedule_work(&fib_work
->work
);
2558 static struct mlxsw_sp_rif
*
2559 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp
*mlxsw_sp
,
2560 const struct net_device
*dev
)
2564 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
2565 if (mlxsw_sp
->rifs
[i
] && mlxsw_sp
->rifs
[i
]->dev
== dev
)
2566 return mlxsw_sp
->rifs
[i
];
2571 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp
*mlxsw_sp
, u16 rif
)
2573 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2576 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif
);
2577 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2578 if (WARN_ON_ONCE(err
))
2581 mlxsw_reg_ritr_enable_set(ritr_pl
, false);
2582 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2585 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
2586 struct mlxsw_sp_rif
*r
)
2588 mlxsw_sp_router_rif_disable(mlxsw_sp
, r
->rif
);
2589 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp
, r
);
2590 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp
, r
);
2593 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif
*r
,
2594 const struct in_device
*in_dev
,
2595 unsigned long event
)
2603 if (r
&& !in_dev
->ifa_list
)
2605 /* It is possible we already removed the RIF ourselves
2606 * if it was assigned to a netdev that is now a bridge
2615 #define MLXSW_SP_INVALID_RIF 0xffff
2616 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp
*mlxsw_sp
)
2620 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
2621 if (!mlxsw_sp
->rifs
[i
])
2624 return MLXSW_SP_INVALID_RIF
;
2627 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2628 bool *p_lagged
, u16
*p_system_port
)
2630 u8 local_port
= mlxsw_sp_vport
->local_port
;
2632 *p_lagged
= mlxsw_sp_vport
->lagged
;
2633 *p_system_port
= *p_lagged
? mlxsw_sp_vport
->lag_id
: local_port
;
2636 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2637 struct net_device
*l3_dev
, u16 rif
,
2640 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2641 bool lagged
= mlxsw_sp_vport
->lagged
;
2642 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2645 mlxsw_reg_ritr_pack(ritr_pl
, create
, MLXSW_REG_RITR_SP_IF
, rif
,
2646 l3_dev
->mtu
, l3_dev
->dev_addr
);
2648 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport
, &lagged
, &system_port
);
2649 mlxsw_reg_ritr_sp_if_pack(ritr_pl
, lagged
, system_port
,
2650 mlxsw_sp_vport_vid_get(mlxsw_sp_vport
));
2652 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2655 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
2657 static u16
mlxsw_sp_rif_sp_to_fid(u16 rif
)
2659 return MLXSW_SP_RFID_BASE
+ rif
;
2662 static struct mlxsw_sp_fid
*
2663 mlxsw_sp_rfid_alloc(u16 fid
, struct net_device
*l3_dev
)
2665 struct mlxsw_sp_fid
*f
;
2667 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
2671 f
->leave
= mlxsw_sp_vport_rif_sp_leave
;
2679 static struct mlxsw_sp_rif
*
2680 mlxsw_sp_rif_alloc(u16 rif
, struct net_device
*l3_dev
, struct mlxsw_sp_fid
*f
)
2682 struct mlxsw_sp_rif
*r
;
2684 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
2688 INIT_LIST_HEAD(&r
->nexthop_list
);
2689 INIT_LIST_HEAD(&r
->neigh_list
);
2690 ether_addr_copy(r
->addr
, l3_dev
->dev_addr
);
2691 r
->mtu
= l3_dev
->mtu
;
2699 static struct mlxsw_sp_rif
*
2700 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2701 struct net_device
*l3_dev
)
2703 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2704 struct mlxsw_sp_fid
*f
;
2705 struct mlxsw_sp_rif
*r
;
2709 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
2710 if (rif
== MLXSW_SP_INVALID_RIF
)
2711 return ERR_PTR(-ERANGE
);
2713 err
= mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, true);
2715 return ERR_PTR(err
);
2717 fid
= mlxsw_sp_rif_sp_to_fid(rif
);
2718 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, true);
2720 goto err_rif_fdb_op
;
2722 f
= mlxsw_sp_rfid_alloc(fid
, l3_dev
);
2725 goto err_rfid_alloc
;
2728 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
2735 mlxsw_sp
->rifs
[rif
] = r
;
2742 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
2744 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
2745 return ERR_PTR(err
);
2748 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2749 struct mlxsw_sp_rif
*r
)
2751 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2752 struct net_device
*l3_dev
= r
->dev
;
2753 struct mlxsw_sp_fid
*f
= r
->f
;
2757 mlxsw_sp_router_rif_gone_sync(mlxsw_sp
, r
);
2759 mlxsw_sp
->rifs
[rif
] = NULL
;
2766 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
2768 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
2771 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2772 struct net_device
*l3_dev
)
2774 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2775 struct mlxsw_sp_rif
*r
;
2777 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
2779 r
= mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport
, l3_dev
);
2784 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, r
->f
);
2787 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", r
->f
->fid
);
2792 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
2794 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
2796 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
2798 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
2799 if (--f
->ref_count
== 0)
2800 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport
, f
->r
);
2803 static int mlxsw_sp_inetaddr_vport_event(struct net_device
*l3_dev
,
2804 struct net_device
*port_dev
,
2805 unsigned long event
, u16 vid
)
2807 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(port_dev
);
2808 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2810 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
2811 if (WARN_ON(!mlxsw_sp_vport
))
2816 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport
, l3_dev
);
2818 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport
);
2825 static int mlxsw_sp_inetaddr_port_event(struct net_device
*port_dev
,
2826 unsigned long event
)
2828 if (netif_is_bridge_port(port_dev
) || netif_is_lag_port(port_dev
))
2831 return mlxsw_sp_inetaddr_vport_event(port_dev
, port_dev
, event
, 1);
2834 static int __mlxsw_sp_inetaddr_lag_event(struct net_device
*l3_dev
,
2835 struct net_device
*lag_dev
,
2836 unsigned long event
, u16 vid
)
2838 struct net_device
*port_dev
;
2839 struct list_head
*iter
;
2842 netdev_for_each_lower_dev(lag_dev
, port_dev
, iter
) {
2843 if (mlxsw_sp_port_dev_check(port_dev
)) {
2844 err
= mlxsw_sp_inetaddr_vport_event(l3_dev
, port_dev
,
2854 static int mlxsw_sp_inetaddr_lag_event(struct net_device
*lag_dev
,
2855 unsigned long event
)
2857 if (netif_is_bridge_port(lag_dev
))
2860 return __mlxsw_sp_inetaddr_lag_event(lag_dev
, lag_dev
, event
, 1);
2863 static struct mlxsw_sp_fid
*mlxsw_sp_bridge_fid_get(struct mlxsw_sp
*mlxsw_sp
,
2864 struct net_device
*l3_dev
)
2868 if (is_vlan_dev(l3_dev
))
2869 fid
= vlan_dev_vlan_id(l3_dev
);
2870 else if (mlxsw_sp
->master_bridge
.dev
== l3_dev
)
2873 return mlxsw_sp_vfid_find(mlxsw_sp
, l3_dev
);
2875 return mlxsw_sp_fid_find(mlxsw_sp
, fid
);
2878 static enum mlxsw_flood_table_type
mlxsw_sp_flood_table_type_get(u16 fid
)
2880 return mlxsw_sp_fid_is_vfid(fid
) ? MLXSW_REG_SFGC_TABLE_TYPE_FID
:
2881 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
2884 static u16
mlxsw_sp_flood_table_index_get(u16 fid
)
2886 return mlxsw_sp_fid_is_vfid(fid
) ? mlxsw_sp_fid_to_vfid(fid
) : fid
;
2889 static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp
*mlxsw_sp
, u16 fid
,
2892 enum mlxsw_flood_table_type table_type
;
2897 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
2901 table_type
= mlxsw_sp_flood_table_type_get(fid
);
2902 index
= mlxsw_sp_flood_table_index_get(fid
);
2903 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BC
, index
, table_type
,
2904 1, MLXSW_PORT_ROUTER_PORT
, set
);
2905 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
2911 static enum mlxsw_reg_ritr_if_type
mlxsw_sp_rif_type_get(u16 fid
)
2913 if (mlxsw_sp_fid_is_vfid(fid
))
2914 return MLXSW_REG_RITR_FID_IF
;
2916 return MLXSW_REG_RITR_VLAN_IF
;
2919 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp
*mlxsw_sp
,
2920 struct net_device
*l3_dev
,
2924 enum mlxsw_reg_ritr_if_type rif_type
;
2925 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2927 rif_type
= mlxsw_sp_rif_type_get(fid
);
2928 mlxsw_reg_ritr_pack(ritr_pl
, create
, rif_type
, rif
, l3_dev
->mtu
,
2930 mlxsw_reg_ritr_fid_set(ritr_pl
, rif_type
, fid
);
2932 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2935 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp
*mlxsw_sp
,
2936 struct net_device
*l3_dev
,
2937 struct mlxsw_sp_fid
*f
)
2939 struct mlxsw_sp_rif
*r
;
2943 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
2944 if (rif
== MLXSW_SP_INVALID_RIF
)
2947 err
= mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, true);
2951 err
= mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, true);
2953 goto err_rif_bridge_op
;
2955 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, true);
2957 goto err_rif_fdb_op
;
2959 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
2966 mlxsw_sp
->rifs
[rif
] = r
;
2968 netdev_dbg(l3_dev
, "RIF=%d created\n", rif
);
2973 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
2975 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
2977 mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, false);
2981 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp
*mlxsw_sp
,
2982 struct mlxsw_sp_rif
*r
)
2984 struct net_device
*l3_dev
= r
->dev
;
2985 struct mlxsw_sp_fid
*f
= r
->f
;
2988 mlxsw_sp_router_rif_gone_sync(mlxsw_sp
, r
);
2990 mlxsw_sp
->rifs
[rif
] = NULL
;
2995 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
2997 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
2999 mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, false);
3001 netdev_dbg(l3_dev
, "RIF=%d destroyed\n", rif
);
3004 static int mlxsw_sp_inetaddr_bridge_event(struct net_device
*l3_dev
,
3005 struct net_device
*br_dev
,
3006 unsigned long event
)
3008 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
3009 struct mlxsw_sp_fid
*f
;
3011 /* FID can either be an actual FID if the L3 device is the
3012 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3013 * L3 device is a VLAN-unaware bridge and we get a vFID.
3015 f
= mlxsw_sp_bridge_fid_get(mlxsw_sp
, l3_dev
);
3021 return mlxsw_sp_rif_bridge_create(mlxsw_sp
, l3_dev
, f
);
3023 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
3030 static int mlxsw_sp_inetaddr_vlan_event(struct net_device
*vlan_dev
,
3031 unsigned long event
)
3033 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3034 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
3035 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3037 if (mlxsw_sp_port_dev_check(real_dev
))
3038 return mlxsw_sp_inetaddr_vport_event(vlan_dev
, real_dev
, event
,
3040 else if (netif_is_lag_master(real_dev
))
3041 return __mlxsw_sp_inetaddr_lag_event(vlan_dev
, real_dev
, event
,
3043 else if (netif_is_bridge_master(real_dev
) &&
3044 mlxsw_sp
->master_bridge
.dev
== real_dev
)
3045 return mlxsw_sp_inetaddr_bridge_event(vlan_dev
, real_dev
,
3051 int mlxsw_sp_inetaddr_event(struct notifier_block
*unused
,
3052 unsigned long event
, void *ptr
)
3054 struct in_ifaddr
*ifa
= (struct in_ifaddr
*) ptr
;
3055 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
3056 struct mlxsw_sp
*mlxsw_sp
;
3057 struct mlxsw_sp_rif
*r
;
3060 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3064 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3065 if (!mlxsw_sp_rif_should_config(r
, ifa
->ifa_dev
, event
))
3068 if (mlxsw_sp_port_dev_check(dev
))
3069 err
= mlxsw_sp_inetaddr_port_event(dev
, event
);
3070 else if (netif_is_lag_master(dev
))
3071 err
= mlxsw_sp_inetaddr_lag_event(dev
, event
);
3072 else if (netif_is_bridge_master(dev
))
3073 err
= mlxsw_sp_inetaddr_bridge_event(dev
, dev
, event
);
3074 else if (is_vlan_dev(dev
))
3075 err
= mlxsw_sp_inetaddr_vlan_event(dev
, event
);
3078 return notifier_from_errno(err
);
3081 static int mlxsw_sp_rif_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif
,
3082 const char *mac
, int mtu
)
3084 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3087 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif
);
3088 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3092 mlxsw_reg_ritr_mtu_set(ritr_pl
, mtu
);
3093 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl
, mac
);
3094 mlxsw_reg_ritr_op_set(ritr_pl
, MLXSW_REG_RITR_RIF_CREATE
);
3095 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3098 int mlxsw_sp_netdevice_router_port_event(struct net_device
*dev
)
3100 struct mlxsw_sp
*mlxsw_sp
;
3101 struct mlxsw_sp_rif
*r
;
3104 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3108 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3112 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, false);
3116 err
= mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, dev
->dev_addr
, dev
->mtu
);
3120 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, dev
->dev_addr
, r
->f
->fid
, true);
3122 goto err_rif_fdb_op
;
3124 ether_addr_copy(r
->addr
, dev
->dev_addr
);
3127 netdev_dbg(dev
, "Updated RIF=%d\n", r
->rif
);
3132 mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, r
->addr
, r
->mtu
);
3134 mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, true);
3138 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block
*nb
)
3140 struct mlxsw_sp
*mlxsw_sp
= container_of(nb
, struct mlxsw_sp
, fib_nb
);
3142 /* Flush pending FIB notifications and then flush the device's
3143 * table before requesting another dump. The FIB notification
3144 * block is unregistered, so no need to take RTNL.
3146 mlxsw_core_flush_owq();
3147 mlxsw_sp_router_fib_flush(mlxsw_sp
);
3150 static int __mlxsw_sp_router_init(struct mlxsw_sp
*mlxsw_sp
)
3152 char rgcr_pl
[MLXSW_REG_RGCR_LEN
];
3156 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_RIFS
))
3159 max_rifs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
);
3160 mlxsw_sp
->rifs
= kcalloc(max_rifs
, sizeof(struct mlxsw_sp_rif
*),
3162 if (!mlxsw_sp
->rifs
)
3165 mlxsw_reg_rgcr_pack(rgcr_pl
, true);
3166 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl
, max_rifs
);
3167 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rgcr
), rgcr_pl
);
3174 kfree(mlxsw_sp
->rifs
);
3178 static void __mlxsw_sp_router_fini(struct mlxsw_sp
*mlxsw_sp
)
3180 char rgcr_pl
[MLXSW_REG_RGCR_LEN
];
3183 mlxsw_reg_rgcr_pack(rgcr_pl
, false);
3184 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rgcr
), rgcr_pl
);
3186 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
3187 WARN_ON_ONCE(mlxsw_sp
->rifs
[i
]);
3189 kfree(mlxsw_sp
->rifs
);
3192 int mlxsw_sp_router_init(struct mlxsw_sp
*mlxsw_sp
)
3196 INIT_LIST_HEAD(&mlxsw_sp
->router
.nexthop_neighs_list
);
3197 err
= __mlxsw_sp_router_init(mlxsw_sp
);
3201 err
= rhashtable_init(&mlxsw_sp
->router
.nexthop_ht
,
3202 &mlxsw_sp_nexthop_ht_params
);
3204 goto err_nexthop_ht_init
;
3206 err
= rhashtable_init(&mlxsw_sp
->router
.nexthop_group_ht
,
3207 &mlxsw_sp_nexthop_group_ht_params
);
3209 goto err_nexthop_group_ht_init
;
3211 mlxsw_sp_lpm_init(mlxsw_sp
);
3212 err
= mlxsw_sp_vrs_init(mlxsw_sp
);
3216 err
= mlxsw_sp_neigh_init(mlxsw_sp
);
3218 goto err_neigh_init
;
3220 mlxsw_sp
->fib_nb
.notifier_call
= mlxsw_sp_router_fib_event
;
3221 err
= register_fib_notifier(&mlxsw_sp
->fib_nb
,
3222 mlxsw_sp_router_fib_dump_flush
);
3224 goto err_register_fib_notifier
;
3228 err_register_fib_notifier
:
3229 mlxsw_sp_neigh_fini(mlxsw_sp
);
3231 mlxsw_sp_vrs_fini(mlxsw_sp
);
3233 rhashtable_destroy(&mlxsw_sp
->router
.nexthop_group_ht
);
3234 err_nexthop_group_ht_init
:
3235 rhashtable_destroy(&mlxsw_sp
->router
.nexthop_ht
);
3236 err_nexthop_ht_init
:
3237 __mlxsw_sp_router_fini(mlxsw_sp
);
3241 void mlxsw_sp_router_fini(struct mlxsw_sp
*mlxsw_sp
)
3243 unregister_fib_notifier(&mlxsw_sp
->fib_nb
);
3244 mlxsw_sp_neigh_fini(mlxsw_sp
);
3245 mlxsw_sp_vrs_fini(mlxsw_sp
);
3246 rhashtable_destroy(&mlxsw_sp
->router
.nexthop_group_ht
);
3247 rhashtable_destroy(&mlxsw_sp
->router
.nexthop_ht
);
3248 __mlxsw_sp_router_fini(mlxsw_sp
);