2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <linux/inetdevice.h>
44 #include <linux/netdevice.h>
45 #include <linux/if_bridge.h>
46 #include <net/netevent.h>
47 #include <net/neighbour.h>
49 #include <net/ip_fib.h>
50 #include <net/fib_rules.h>
51 #include <net/l3mdev.h>
56 #include "spectrum_cnt.h"
57 #include "spectrum_dpipe.h"
58 #include "spectrum_router.h"
61 struct mlxsw_sp_lpm_tree
;
62 struct mlxsw_sp_rif_ops
;
64 struct mlxsw_sp_router
{
65 struct mlxsw_sp
*mlxsw_sp
;
66 struct mlxsw_sp_rif
**rifs
;
67 struct mlxsw_sp_vr
*vrs
;
68 struct rhashtable neigh_ht
;
69 struct rhashtable nexthop_group_ht
;
70 struct rhashtable nexthop_ht
;
72 struct mlxsw_sp_lpm_tree
*trees
;
73 unsigned int tree_count
;
76 struct delayed_work dw
;
77 unsigned long interval
; /* ms */
79 struct delayed_work nexthop_probe_dw
;
80 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
81 struct list_head nexthop_neighs_list
;
83 struct notifier_block fib_nb
;
84 const struct mlxsw_sp_rif_ops
**rif_ops_arr
;
88 struct list_head nexthop_list
;
89 struct list_head neigh_list
;
90 struct net_device
*dev
;
91 struct mlxsw_sp_fid
*fid
;
92 unsigned char addr
[ETH_ALEN
];
96 const struct mlxsw_sp_rif_ops
*ops
;
97 struct mlxsw_sp
*mlxsw_sp
;
99 unsigned int counter_ingress
;
100 bool counter_ingress_valid
;
101 unsigned int counter_egress
;
102 bool counter_egress_valid
;
105 struct mlxsw_sp_rif_params
{
106 struct net_device
*dev
;
115 struct mlxsw_sp_rif_subport
{
116 struct mlxsw_sp_rif common
;
125 struct mlxsw_sp_rif_ops
{
126 enum mlxsw_sp_rif_type type
;
129 void (*setup
)(struct mlxsw_sp_rif
*rif
,
130 const struct mlxsw_sp_rif_params
*params
);
131 int (*configure
)(struct mlxsw_sp_rif
*rif
);
132 void (*deconfigure
)(struct mlxsw_sp_rif
*rif
);
133 struct mlxsw_sp_fid
* (*fid_get
)(struct mlxsw_sp_rif
*rif
);
136 static unsigned int *
137 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif
*rif
,
138 enum mlxsw_sp_rif_counter_dir dir
)
141 case MLXSW_SP_RIF_COUNTER_EGRESS
:
142 return &rif
->counter_egress
;
143 case MLXSW_SP_RIF_COUNTER_INGRESS
:
144 return &rif
->counter_ingress
;
150 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif
*rif
,
151 enum mlxsw_sp_rif_counter_dir dir
)
154 case MLXSW_SP_RIF_COUNTER_EGRESS
:
155 return rif
->counter_egress_valid
;
156 case MLXSW_SP_RIF_COUNTER_INGRESS
:
157 return rif
->counter_ingress_valid
;
163 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif
*rif
,
164 enum mlxsw_sp_rif_counter_dir dir
,
168 case MLXSW_SP_RIF_COUNTER_EGRESS
:
169 rif
->counter_egress_valid
= valid
;
171 case MLXSW_SP_RIF_COUNTER_INGRESS
:
172 rif
->counter_ingress_valid
= valid
;
177 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif_index
,
178 unsigned int counter_index
, bool enable
,
179 enum mlxsw_sp_rif_counter_dir dir
)
181 char ritr_pl
[MLXSW_REG_RITR_LEN
];
182 bool is_egress
= false;
185 if (dir
== MLXSW_SP_RIF_COUNTER_EGRESS
)
187 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif_index
);
188 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
192 mlxsw_reg_ritr_counter_pack(ritr_pl
, counter_index
, enable
,
194 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
197 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp
*mlxsw_sp
,
198 struct mlxsw_sp_rif
*rif
,
199 enum mlxsw_sp_rif_counter_dir dir
, u64
*cnt
)
201 char ricnt_pl
[MLXSW_REG_RICNT_LEN
];
202 unsigned int *p_counter_index
;
206 valid
= mlxsw_sp_rif_counter_valid_get(rif
, dir
);
210 p_counter_index
= mlxsw_sp_rif_p_counter_get(rif
, dir
);
211 if (!p_counter_index
)
213 mlxsw_reg_ricnt_pack(ricnt_pl
, *p_counter_index
,
214 MLXSW_REG_RICNT_OPCODE_NOP
);
215 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ricnt
), ricnt_pl
);
218 *cnt
= mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl
);
222 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
223 unsigned int counter_index
)
225 char ricnt_pl
[MLXSW_REG_RICNT_LEN
];
227 mlxsw_reg_ricnt_pack(ricnt_pl
, counter_index
,
228 MLXSW_REG_RICNT_OPCODE_CLEAR
);
229 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ricnt
), ricnt_pl
);
232 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
233 struct mlxsw_sp_rif
*rif
,
234 enum mlxsw_sp_rif_counter_dir dir
)
236 unsigned int *p_counter_index
;
239 p_counter_index
= mlxsw_sp_rif_p_counter_get(rif
, dir
);
240 if (!p_counter_index
)
242 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_RIF
,
247 err
= mlxsw_sp_rif_counter_clear(mlxsw_sp
, *p_counter_index
);
249 goto err_counter_clear
;
251 err
= mlxsw_sp_rif_counter_edit(mlxsw_sp
, rif
->rif_index
,
252 *p_counter_index
, true, dir
);
254 goto err_counter_edit
;
255 mlxsw_sp_rif_counter_valid_set(rif
, dir
, true);
260 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_RIF
,
265 void mlxsw_sp_rif_counter_free(struct mlxsw_sp
*mlxsw_sp
,
266 struct mlxsw_sp_rif
*rif
,
267 enum mlxsw_sp_rif_counter_dir dir
)
269 unsigned int *p_counter_index
;
271 if (!mlxsw_sp_rif_counter_valid_get(rif
, dir
))
274 p_counter_index
= mlxsw_sp_rif_p_counter_get(rif
, dir
);
275 if (WARN_ON(!p_counter_index
))
277 mlxsw_sp_rif_counter_edit(mlxsw_sp
, rif
->rif_index
,
278 *p_counter_index
, false, dir
);
279 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_RIF
,
281 mlxsw_sp_rif_counter_valid_set(rif
, dir
, false);
284 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif
*rif
)
286 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
287 struct devlink
*devlink
;
289 devlink
= priv_to_devlink(mlxsw_sp
->core
);
290 if (!devlink_dpipe_table_counter_enabled(devlink
,
291 MLXSW_SP_DPIPE_TABLE_NAME_ERIF
))
293 mlxsw_sp_rif_counter_alloc(mlxsw_sp
, rif
, MLXSW_SP_RIF_COUNTER_EGRESS
);
296 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif
*rif
)
298 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
300 mlxsw_sp_rif_counter_free(mlxsw_sp
, rif
, MLXSW_SP_RIF_COUNTER_EGRESS
);
303 static struct mlxsw_sp_rif
*
304 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp
*mlxsw_sp
,
305 const struct net_device
*dev
);
307 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
309 struct mlxsw_sp_prefix_usage
{
310 DECLARE_BITMAP(b
, MLXSW_SP_PREFIX_COUNT
);
313 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
314 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
317 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
318 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
320 unsigned char prefix
;
322 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage1
) {
323 if (!test_bit(prefix
, prefix_usage2
->b
))
330 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
331 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
333 return !memcmp(prefix_usage1
, prefix_usage2
, sizeof(*prefix_usage1
));
337 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage
*prefix_usage
)
339 struct mlxsw_sp_prefix_usage prefix_usage_none
= {{ 0 } };
341 return mlxsw_sp_prefix_usage_eq(prefix_usage
, &prefix_usage_none
);
345 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
346 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
348 memcpy(prefix_usage1
, prefix_usage2
, sizeof(*prefix_usage1
));
352 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage
*prefix_usage
,
353 unsigned char prefix_len
)
355 set_bit(prefix_len
, prefix_usage
->b
);
359 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage
*prefix_usage
,
360 unsigned char prefix_len
)
362 clear_bit(prefix_len
, prefix_usage
->b
);
365 struct mlxsw_sp_fib_key
{
366 unsigned char addr
[sizeof(struct in6_addr
)];
367 unsigned char prefix_len
;
370 enum mlxsw_sp_fib_entry_type
{
371 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
,
372 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
,
373 MLXSW_SP_FIB_ENTRY_TYPE_TRAP
,
376 struct mlxsw_sp_nexthop_group
;
379 struct mlxsw_sp_fib_node
{
380 struct list_head entry_list
;
381 struct list_head list
;
382 struct rhash_head ht_node
;
383 struct mlxsw_sp_fib
*fib
;
384 struct mlxsw_sp_fib_key key
;
387 struct mlxsw_sp_fib_entry_params
{
394 struct mlxsw_sp_fib_entry
{
395 struct list_head list
;
396 struct mlxsw_sp_fib_node
*fib_node
;
397 enum mlxsw_sp_fib_entry_type type
;
398 struct list_head nexthop_group_node
;
399 struct mlxsw_sp_nexthop_group
*nh_group
;
400 struct mlxsw_sp_fib_entry_params params
;
404 enum mlxsw_sp_l3proto
{
405 MLXSW_SP_L3_PROTO_IPV4
,
406 MLXSW_SP_L3_PROTO_IPV6
,
409 struct mlxsw_sp_lpm_tree
{
411 unsigned int ref_count
;
412 enum mlxsw_sp_l3proto proto
;
413 struct mlxsw_sp_prefix_usage prefix_usage
;
416 struct mlxsw_sp_fib
{
417 struct rhashtable ht
;
418 struct list_head node_list
;
419 struct mlxsw_sp_vr
*vr
;
420 struct mlxsw_sp_lpm_tree
*lpm_tree
;
421 unsigned long prefix_ref_count
[MLXSW_SP_PREFIX_COUNT
];
422 struct mlxsw_sp_prefix_usage prefix_usage
;
423 enum mlxsw_sp_l3proto proto
;
427 u16 id
; /* virtual router ID */
428 u32 tb_id
; /* kernel fib table id */
429 unsigned int rif_count
;
430 struct mlxsw_sp_fib
*fib4
;
433 static const struct rhashtable_params mlxsw_sp_fib_ht_params
;
435 static struct mlxsw_sp_fib
*mlxsw_sp_fib_create(struct mlxsw_sp_vr
*vr
,
436 enum mlxsw_sp_l3proto proto
)
438 struct mlxsw_sp_fib
*fib
;
441 fib
= kzalloc(sizeof(*fib
), GFP_KERNEL
);
443 return ERR_PTR(-ENOMEM
);
444 err
= rhashtable_init(&fib
->ht
, &mlxsw_sp_fib_ht_params
);
446 goto err_rhashtable_init
;
447 INIT_LIST_HEAD(&fib
->node_list
);
457 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib
*fib
)
459 WARN_ON(!list_empty(&fib
->node_list
));
460 WARN_ON(fib
->lpm_tree
);
461 rhashtable_destroy(&fib
->ht
);
465 static struct mlxsw_sp_lpm_tree
*
466 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp
*mlxsw_sp
)
468 static struct mlxsw_sp_lpm_tree
*lpm_tree
;
471 for (i
= 0; i
< mlxsw_sp
->router
->lpm
.tree_count
; i
++) {
472 lpm_tree
= &mlxsw_sp
->router
->lpm
.trees
[i
];
473 if (lpm_tree
->ref_count
== 0)
479 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp
*mlxsw_sp
,
480 struct mlxsw_sp_lpm_tree
*lpm_tree
)
482 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
484 mlxsw_reg_ralta_pack(ralta_pl
, true,
485 (enum mlxsw_reg_ralxx_protocol
) lpm_tree
->proto
,
487 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
490 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp
*mlxsw_sp
,
491 struct mlxsw_sp_lpm_tree
*lpm_tree
)
493 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
495 mlxsw_reg_ralta_pack(ralta_pl
, false,
496 (enum mlxsw_reg_ralxx_protocol
) lpm_tree
->proto
,
498 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
502 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp
*mlxsw_sp
,
503 struct mlxsw_sp_prefix_usage
*prefix_usage
,
504 struct mlxsw_sp_lpm_tree
*lpm_tree
)
506 char ralst_pl
[MLXSW_REG_RALST_LEN
];
509 u8 last_prefix
= MLXSW_REG_RALST_BIN_NO_CHILD
;
511 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage
)
514 mlxsw_reg_ralst_pack(ralst_pl
, root_bin
, lpm_tree
->id
);
515 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage
) {
518 mlxsw_reg_ralst_bin_pack(ralst_pl
, prefix
, last_prefix
,
519 MLXSW_REG_RALST_BIN_NO_CHILD
);
520 last_prefix
= prefix
;
522 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralst
), ralst_pl
);
525 static struct mlxsw_sp_lpm_tree
*
526 mlxsw_sp_lpm_tree_create(struct mlxsw_sp
*mlxsw_sp
,
527 struct mlxsw_sp_prefix_usage
*prefix_usage
,
528 enum mlxsw_sp_l3proto proto
)
530 struct mlxsw_sp_lpm_tree
*lpm_tree
;
533 lpm_tree
= mlxsw_sp_lpm_tree_find_unused(mlxsw_sp
);
535 return ERR_PTR(-EBUSY
);
536 lpm_tree
->proto
= proto
;
537 err
= mlxsw_sp_lpm_tree_alloc(mlxsw_sp
, lpm_tree
);
541 err
= mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp
, prefix_usage
,
544 goto err_left_struct_set
;
545 memcpy(&lpm_tree
->prefix_usage
, prefix_usage
,
546 sizeof(lpm_tree
->prefix_usage
));
550 mlxsw_sp_lpm_tree_free(mlxsw_sp
, lpm_tree
);
554 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp
*mlxsw_sp
,
555 struct mlxsw_sp_lpm_tree
*lpm_tree
)
557 return mlxsw_sp_lpm_tree_free(mlxsw_sp
, lpm_tree
);
560 static struct mlxsw_sp_lpm_tree
*
561 mlxsw_sp_lpm_tree_get(struct mlxsw_sp
*mlxsw_sp
,
562 struct mlxsw_sp_prefix_usage
*prefix_usage
,
563 enum mlxsw_sp_l3proto proto
)
565 struct mlxsw_sp_lpm_tree
*lpm_tree
;
568 for (i
= 0; i
< mlxsw_sp
->router
->lpm
.tree_count
; i
++) {
569 lpm_tree
= &mlxsw_sp
->router
->lpm
.trees
[i
];
570 if (lpm_tree
->ref_count
!= 0 &&
571 lpm_tree
->proto
== proto
&&
572 mlxsw_sp_prefix_usage_eq(&lpm_tree
->prefix_usage
,
576 lpm_tree
= mlxsw_sp_lpm_tree_create(mlxsw_sp
, prefix_usage
,
578 if (IS_ERR(lpm_tree
))
582 lpm_tree
->ref_count
++;
586 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp
*mlxsw_sp
,
587 struct mlxsw_sp_lpm_tree
*lpm_tree
)
589 if (--lpm_tree
->ref_count
== 0)
590 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp
, lpm_tree
);
594 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
596 static int mlxsw_sp_lpm_init(struct mlxsw_sp
*mlxsw_sp
)
598 struct mlxsw_sp_lpm_tree
*lpm_tree
;
602 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LPM_TREES
))
605 max_trees
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LPM_TREES
);
606 mlxsw_sp
->router
->lpm
.tree_count
= max_trees
- MLXSW_SP_LPM_TREE_MIN
;
607 mlxsw_sp
->router
->lpm
.trees
= kcalloc(mlxsw_sp
->router
->lpm
.tree_count
,
608 sizeof(struct mlxsw_sp_lpm_tree
),
610 if (!mlxsw_sp
->router
->lpm
.trees
)
613 for (i
= 0; i
< mlxsw_sp
->router
->lpm
.tree_count
; i
++) {
614 lpm_tree
= &mlxsw_sp
->router
->lpm
.trees
[i
];
615 lpm_tree
->id
= i
+ MLXSW_SP_LPM_TREE_MIN
;
621 static void mlxsw_sp_lpm_fini(struct mlxsw_sp
*mlxsw_sp
)
623 kfree(mlxsw_sp
->router
->lpm
.trees
);
626 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr
*vr
)
631 static struct mlxsw_sp_vr
*mlxsw_sp_vr_find_unused(struct mlxsw_sp
*mlxsw_sp
)
633 struct mlxsw_sp_vr
*vr
;
636 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
637 vr
= &mlxsw_sp
->router
->vrs
[i
];
638 if (!mlxsw_sp_vr_is_used(vr
))
644 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp
*mlxsw_sp
,
645 const struct mlxsw_sp_fib
*fib
)
647 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
649 mlxsw_reg_raltb_pack(raltb_pl
, fib
->vr
->id
,
650 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
,
652 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
), raltb_pl
);
655 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp
*mlxsw_sp
,
656 const struct mlxsw_sp_fib
*fib
)
658 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
660 /* Bind to tree 0 which is default */
661 mlxsw_reg_raltb_pack(raltb_pl
, fib
->vr
->id
,
662 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, 0);
663 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
), raltb_pl
);
666 static u32
mlxsw_sp_fix_tb_id(u32 tb_id
)
668 /* For our purpose, squash main and local table into one */
669 if (tb_id
== RT_TABLE_LOCAL
)
670 tb_id
= RT_TABLE_MAIN
;
674 static struct mlxsw_sp_vr
*mlxsw_sp_vr_find(struct mlxsw_sp
*mlxsw_sp
,
677 struct mlxsw_sp_vr
*vr
;
680 tb_id
= mlxsw_sp_fix_tb_id(tb_id
);
682 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
683 vr
= &mlxsw_sp
->router
->vrs
[i
];
684 if (mlxsw_sp_vr_is_used(vr
) && vr
->tb_id
== tb_id
)
690 static struct mlxsw_sp_fib
*mlxsw_sp_vr_fib(const struct mlxsw_sp_vr
*vr
,
691 enum mlxsw_sp_l3proto proto
)
694 case MLXSW_SP_L3_PROTO_IPV4
:
696 case MLXSW_SP_L3_PROTO_IPV6
:
702 static struct mlxsw_sp_vr
*mlxsw_sp_vr_create(struct mlxsw_sp
*mlxsw_sp
,
705 struct mlxsw_sp_vr
*vr
;
707 vr
= mlxsw_sp_vr_find_unused(mlxsw_sp
);
709 return ERR_PTR(-EBUSY
);
710 vr
->fib4
= mlxsw_sp_fib_create(vr
, MLXSW_SP_L3_PROTO_IPV4
);
711 if (IS_ERR(vr
->fib4
))
712 return ERR_CAST(vr
->fib4
);
717 static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr
*vr
)
719 mlxsw_sp_fib_destroy(vr
->fib4
);
724 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp
*mlxsw_sp
, struct mlxsw_sp_fib
*fib
,
725 struct mlxsw_sp_prefix_usage
*req_prefix_usage
)
727 struct mlxsw_sp_lpm_tree
*lpm_tree
= fib
->lpm_tree
;
728 struct mlxsw_sp_lpm_tree
*new_tree
;
731 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage
, &lpm_tree
->prefix_usage
))
734 new_tree
= mlxsw_sp_lpm_tree_get(mlxsw_sp
, req_prefix_usage
,
736 if (IS_ERR(new_tree
)) {
737 /* We failed to get a tree according to the required
738 * prefix usage. However, the current tree might be still good
739 * for us if our requirement is subset of the prefixes used
742 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage
,
743 &lpm_tree
->prefix_usage
))
745 return PTR_ERR(new_tree
);
748 /* Prevent packet loss by overwriting existing binding */
749 fib
->lpm_tree
= new_tree
;
750 err
= mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp
, fib
);
753 mlxsw_sp_lpm_tree_put(mlxsw_sp
, lpm_tree
);
758 fib
->lpm_tree
= lpm_tree
;
759 mlxsw_sp_lpm_tree_put(mlxsw_sp
, new_tree
);
763 static struct mlxsw_sp_vr
*mlxsw_sp_vr_get(struct mlxsw_sp
*mlxsw_sp
, u32 tb_id
)
765 struct mlxsw_sp_vr
*vr
;
767 tb_id
= mlxsw_sp_fix_tb_id(tb_id
);
768 vr
= mlxsw_sp_vr_find(mlxsw_sp
, tb_id
);
770 vr
= mlxsw_sp_vr_create(mlxsw_sp
, tb_id
);
774 static void mlxsw_sp_vr_put(struct mlxsw_sp_vr
*vr
)
776 if (!vr
->rif_count
&& list_empty(&vr
->fib4
->node_list
))
777 mlxsw_sp_vr_destroy(vr
);
780 static int mlxsw_sp_vrs_init(struct mlxsw_sp
*mlxsw_sp
)
782 struct mlxsw_sp_vr
*vr
;
786 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_VRS
))
789 max_vrs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
);
790 mlxsw_sp
->router
->vrs
= kcalloc(max_vrs
, sizeof(struct mlxsw_sp_vr
),
792 if (!mlxsw_sp
->router
->vrs
)
795 for (i
= 0; i
< max_vrs
; i
++) {
796 vr
= &mlxsw_sp
->router
->vrs
[i
];
803 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp
*mlxsw_sp
);
805 static void mlxsw_sp_vrs_fini(struct mlxsw_sp
*mlxsw_sp
)
807 /* At this stage we're guaranteed not to have new incoming
808 * FIB notifications and the work queue is free from FIBs
809 * sitting on top of mlxsw netdevs. However, we can still
810 * have other FIBs queued. Flush the queue before flushing
811 * the device's tables. No need for locks, as we're the only
814 mlxsw_core_flush_owq();
815 mlxsw_sp_router_fib_flush(mlxsw_sp
);
816 kfree(mlxsw_sp
->router
->vrs
);
819 struct mlxsw_sp_neigh_key
{
823 struct mlxsw_sp_neigh_entry
{
824 struct list_head rif_list_node
;
825 struct rhash_head ht_node
;
826 struct mlxsw_sp_neigh_key key
;
829 unsigned char ha
[ETH_ALEN
];
830 struct list_head nexthop_list
; /* list of nexthops using
833 struct list_head nexthop_neighs_list_node
;
836 static const struct rhashtable_params mlxsw_sp_neigh_ht_params
= {
837 .key_offset
= offsetof(struct mlxsw_sp_neigh_entry
, key
),
838 .head_offset
= offsetof(struct mlxsw_sp_neigh_entry
, ht_node
),
839 .key_len
= sizeof(struct mlxsw_sp_neigh_key
),
842 static struct mlxsw_sp_neigh_entry
*
843 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
,
846 struct mlxsw_sp_neigh_entry
*neigh_entry
;
848 neigh_entry
= kzalloc(sizeof(*neigh_entry
), GFP_KERNEL
);
852 neigh_entry
->key
.n
= n
;
853 neigh_entry
->rif
= rif
;
854 INIT_LIST_HEAD(&neigh_entry
->nexthop_list
);
859 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry
*neigh_entry
)
865 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp
*mlxsw_sp
,
866 struct mlxsw_sp_neigh_entry
*neigh_entry
)
868 return rhashtable_insert_fast(&mlxsw_sp
->router
->neigh_ht
,
869 &neigh_entry
->ht_node
,
870 mlxsw_sp_neigh_ht_params
);
874 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp
*mlxsw_sp
,
875 struct mlxsw_sp_neigh_entry
*neigh_entry
)
877 rhashtable_remove_fast(&mlxsw_sp
->router
->neigh_ht
,
878 &neigh_entry
->ht_node
,
879 mlxsw_sp_neigh_ht_params
);
882 static struct mlxsw_sp_neigh_entry
*
883 mlxsw_sp_neigh_entry_create(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
)
885 struct mlxsw_sp_neigh_entry
*neigh_entry
;
886 struct mlxsw_sp_rif
*rif
;
889 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, n
->dev
);
891 return ERR_PTR(-EINVAL
);
893 neigh_entry
= mlxsw_sp_neigh_entry_alloc(mlxsw_sp
, n
, rif
->rif_index
);
895 return ERR_PTR(-ENOMEM
);
897 err
= mlxsw_sp_neigh_entry_insert(mlxsw_sp
, neigh_entry
);
899 goto err_neigh_entry_insert
;
901 list_add(&neigh_entry
->rif_list_node
, &rif
->neigh_list
);
905 err_neigh_entry_insert
:
906 mlxsw_sp_neigh_entry_free(neigh_entry
);
911 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
912 struct mlxsw_sp_neigh_entry
*neigh_entry
)
914 list_del(&neigh_entry
->rif_list_node
);
915 mlxsw_sp_neigh_entry_remove(mlxsw_sp
, neigh_entry
);
916 mlxsw_sp_neigh_entry_free(neigh_entry
);
919 static struct mlxsw_sp_neigh_entry
*
920 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
)
922 struct mlxsw_sp_neigh_key key
;
925 return rhashtable_lookup_fast(&mlxsw_sp
->router
->neigh_ht
,
926 &key
, mlxsw_sp_neigh_ht_params
);
930 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp
*mlxsw_sp
)
932 unsigned long interval
= NEIGH_VAR(&arp_tbl
.parms
, DELAY_PROBE_TIME
);
934 mlxsw_sp
->router
->neighs_update
.interval
= jiffies_to_msecs(interval
);
937 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp
*mlxsw_sp
,
941 struct net_device
*dev
;
947 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl
, ent_index
, &rif
, &dip
);
949 if (!mlxsw_sp
->router
->rifs
[rif
]) {
950 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect RIF in neighbour entry\n");
955 dev
= mlxsw_sp
->router
->rifs
[rif
]->dev
;
956 n
= neigh_lookup(&arp_tbl
, &dipn
, dev
);
958 netdev_err(dev
, "Failed to find matching neighbour for IP=%pI4h\n",
963 netdev_dbg(dev
, "Updating neighbour with IP=%pI4h\n", &dip
);
964 neigh_event_send(n
, NULL
);
968 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp
*mlxsw_sp
,
975 num_entries
= mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl
,
977 /* Hardware starts counting at 0, so add 1. */
980 /* Each record consists of several neighbour entries. */
981 for (i
= 0; i
< num_entries
; i
++) {
984 ent_index
= rec_index
* MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC
+ i
;
985 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp
, rauhtd_pl
,
991 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp
*mlxsw_sp
,
992 char *rauhtd_pl
, int rec_index
)
994 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl
, rec_index
)) {
995 case MLXSW_REG_RAUHTD_TYPE_IPV4
:
996 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp
, rauhtd_pl
,
999 case MLXSW_REG_RAUHTD_TYPE_IPV6
:
1005 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl
)
1007 u8 num_rec
, last_rec_index
, num_entries
;
1009 num_rec
= mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl
);
1010 last_rec_index
= num_rec
- 1;
1012 if (num_rec
< MLXSW_REG_RAUHTD_REC_MAX_NUM
)
1014 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl
, last_rec_index
) ==
1015 MLXSW_REG_RAUHTD_TYPE_IPV6
)
1018 num_entries
= mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl
,
1020 if (++num_entries
== MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC
)
1025 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp
*mlxsw_sp
)
1031 rauhtd_pl
= kmalloc(MLXSW_REG_RAUHTD_LEN
, GFP_KERNEL
);
1035 /* Make sure the neighbour's netdev isn't removed in the
1040 mlxsw_reg_rauhtd_pack(rauhtd_pl
, MLXSW_REG_RAUHTD_TYPE_IPV4
);
1041 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(rauhtd
),
1044 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to dump neighbour talbe\n");
1047 num_rec
= mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl
);
1048 for (i
= 0; i
< num_rec
; i
++)
1049 mlxsw_sp_router_neigh_rec_process(mlxsw_sp
, rauhtd_pl
,
1051 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl
));
1058 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp
*mlxsw_sp
)
1060 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1062 /* Take RTNL mutex here to prevent lists from changes */
1064 list_for_each_entry(neigh_entry
, &mlxsw_sp
->router
->nexthop_neighs_list
,
1065 nexthop_neighs_list_node
)
1066 /* If this neigh have nexthops, make the kernel think this neigh
1067 * is active regardless of the traffic.
1069 neigh_event_send(neigh_entry
->key
.n
, NULL
);
1074 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1076 unsigned long interval
= mlxsw_sp
->router
->neighs_update
.interval
;
1078 mlxsw_core_schedule_dw(&mlxsw_sp
->router
->neighs_update
.dw
,
1079 msecs_to_jiffies(interval
));
1082 static void mlxsw_sp_router_neighs_update_work(struct work_struct
*work
)
1084 struct mlxsw_sp_router
*router
;
1087 router
= container_of(work
, struct mlxsw_sp_router
,
1088 neighs_update
.dw
.work
);
1089 err
= mlxsw_sp_router_neighs_update_rauhtd(router
->mlxsw_sp
);
1091 dev_err(router
->mlxsw_sp
->bus_info
->dev
, "Could not update kernel for neigh activity");
1093 mlxsw_sp_router_neighs_update_nh(router
->mlxsw_sp
);
1095 mlxsw_sp_router_neighs_update_work_schedule(router
->mlxsw_sp
);
1098 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct
*work
)
1100 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1101 struct mlxsw_sp_router
*router
;
1103 router
= container_of(work
, struct mlxsw_sp_router
,
1104 nexthop_probe_dw
.work
);
1105 /* Iterate over nexthop neighbours, find those who are unresolved and
1106 * send arp on them. This solves the chicken-egg problem when
1107 * the nexthop wouldn't get offloaded until the neighbor is resolved
1108 * but it wouldn't get resolved ever in case traffic is flowing in HW
1109 * using different nexthop.
1111 * Take RTNL mutex here to prevent lists from changes.
1114 list_for_each_entry(neigh_entry
, &router
->nexthop_neighs_list
,
1115 nexthop_neighs_list_node
)
1116 if (!neigh_entry
->connected
)
1117 neigh_event_send(neigh_entry
->key
.n
, NULL
);
1120 mlxsw_core_schedule_dw(&router
->nexthop_probe_dw
,
1121 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL
);
1125 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp
*mlxsw_sp
,
1126 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1129 static enum mlxsw_reg_rauht_op
mlxsw_sp_rauht_op(bool adding
)
1131 return adding
? MLXSW_REG_RAUHT_OP_WRITE_ADD
:
1132 MLXSW_REG_RAUHT_OP_WRITE_DELETE
;
1136 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp
*mlxsw_sp
,
1137 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1138 enum mlxsw_reg_rauht_op op
)
1140 struct neighbour
*n
= neigh_entry
->key
.n
;
1141 u32 dip
= ntohl(*((__be32
*) n
->primary_key
));
1142 char rauht_pl
[MLXSW_REG_RAUHT_LEN
];
1144 mlxsw_reg_rauht_pack4(rauht_pl
, op
, neigh_entry
->rif
, neigh_entry
->ha
,
1146 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rauht
), rauht_pl
);
1150 mlxsw_sp_neigh_entry_update(struct mlxsw_sp
*mlxsw_sp
,
1151 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1154 if (!adding
&& !neigh_entry
->connected
)
1156 neigh_entry
->connected
= adding
;
1157 if (neigh_entry
->key
.n
->tbl
== &arp_tbl
)
1158 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp
, neigh_entry
,
1159 mlxsw_sp_rauht_op(adding
));
1164 struct mlxsw_sp_neigh_event_work
{
1165 struct work_struct work
;
1166 struct mlxsw_sp
*mlxsw_sp
;
1167 struct neighbour
*n
;
1170 static void mlxsw_sp_router_neigh_event_work(struct work_struct
*work
)
1172 struct mlxsw_sp_neigh_event_work
*neigh_work
=
1173 container_of(work
, struct mlxsw_sp_neigh_event_work
, work
);
1174 struct mlxsw_sp
*mlxsw_sp
= neigh_work
->mlxsw_sp
;
1175 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1176 struct neighbour
*n
= neigh_work
->n
;
1177 unsigned char ha
[ETH_ALEN
];
1178 bool entry_connected
;
1181 /* If these parameters are changed after we release the lock,
1182 * then we are guaranteed to receive another event letting us
1185 read_lock_bh(&n
->lock
);
1186 memcpy(ha
, n
->ha
, ETH_ALEN
);
1187 nud_state
= n
->nud_state
;
1189 read_unlock_bh(&n
->lock
);
1192 entry_connected
= nud_state
& NUD_VALID
&& !dead
;
1193 neigh_entry
= mlxsw_sp_neigh_entry_lookup(mlxsw_sp
, n
);
1194 if (!entry_connected
&& !neigh_entry
)
1197 neigh_entry
= mlxsw_sp_neigh_entry_create(mlxsw_sp
, n
);
1198 if (IS_ERR(neigh_entry
))
1202 memcpy(neigh_entry
->ha
, ha
, ETH_ALEN
);
1203 mlxsw_sp_neigh_entry_update(mlxsw_sp
, neigh_entry
, entry_connected
);
1204 mlxsw_sp_nexthop_neigh_update(mlxsw_sp
, neigh_entry
, !entry_connected
);
1206 if (!neigh_entry
->connected
&& list_empty(&neigh_entry
->nexthop_list
))
1207 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1215 int mlxsw_sp_router_netevent_event(struct notifier_block
*unused
,
1216 unsigned long event
, void *ptr
)
1218 struct mlxsw_sp_neigh_event_work
*neigh_work
;
1219 struct mlxsw_sp_port
*mlxsw_sp_port
;
1220 struct mlxsw_sp
*mlxsw_sp
;
1221 unsigned long interval
;
1222 struct neigh_parms
*p
;
1223 struct neighbour
*n
;
1226 case NETEVENT_DELAY_PROBE_TIME_UPDATE
:
1229 /* We don't care about changes in the default table. */
1230 if (!p
->dev
|| p
->tbl
!= &arp_tbl
)
1233 /* We are in atomic context and can't take RTNL mutex,
1234 * so use RCU variant to walk the device chain.
1236 mlxsw_sp_port
= mlxsw_sp_port_lower_dev_hold(p
->dev
);
1240 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1241 interval
= jiffies_to_msecs(NEIGH_VAR(p
, DELAY_PROBE_TIME
));
1242 mlxsw_sp
->router
->neighs_update
.interval
= interval
;
1244 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1246 case NETEVENT_NEIGH_UPDATE
:
1249 if (n
->tbl
!= &arp_tbl
)
1252 mlxsw_sp_port
= mlxsw_sp_port_lower_dev_hold(n
->dev
);
1256 neigh_work
= kzalloc(sizeof(*neigh_work
), GFP_ATOMIC
);
1258 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1262 INIT_WORK(&neigh_work
->work
, mlxsw_sp_router_neigh_event_work
);
1263 neigh_work
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1266 /* Take a reference to ensure the neighbour won't be
1267 * destructed until we drop the reference in delayed
1271 mlxsw_core_schedule_work(&neigh_work
->work
);
1272 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1279 static int mlxsw_sp_neigh_init(struct mlxsw_sp
*mlxsw_sp
)
1283 err
= rhashtable_init(&mlxsw_sp
->router
->neigh_ht
,
1284 &mlxsw_sp_neigh_ht_params
);
1288 /* Initialize the polling interval according to the default
1291 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp
);
1293 /* Create the delayed works for the activity_update */
1294 INIT_DELAYED_WORK(&mlxsw_sp
->router
->neighs_update
.dw
,
1295 mlxsw_sp_router_neighs_update_work
);
1296 INIT_DELAYED_WORK(&mlxsw_sp
->router
->nexthop_probe_dw
,
1297 mlxsw_sp_router_probe_unresolved_nexthops
);
1298 mlxsw_core_schedule_dw(&mlxsw_sp
->router
->neighs_update
.dw
, 0);
1299 mlxsw_core_schedule_dw(&mlxsw_sp
->router
->nexthop_probe_dw
, 0);
1303 static void mlxsw_sp_neigh_fini(struct mlxsw_sp
*mlxsw_sp
)
1305 cancel_delayed_work_sync(&mlxsw_sp
->router
->neighs_update
.dw
);
1306 cancel_delayed_work_sync(&mlxsw_sp
->router
->nexthop_probe_dw
);
1307 rhashtable_destroy(&mlxsw_sp
->router
->neigh_ht
);
1310 static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp
*mlxsw_sp
,
1311 const struct mlxsw_sp_rif
*rif
)
1313 char rauht_pl
[MLXSW_REG_RAUHT_LEN
];
1315 mlxsw_reg_rauht_pack(rauht_pl
, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL
,
1316 rif
->rif_index
, rif
->addr
);
1317 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rauht
), rauht_pl
);
1320 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
1321 struct mlxsw_sp_rif
*rif
)
1323 struct mlxsw_sp_neigh_entry
*neigh_entry
, *tmp
;
1325 mlxsw_sp_neigh_rif_flush(mlxsw_sp
, rif
);
1326 list_for_each_entry_safe(neigh_entry
, tmp
, &rif
->neigh_list
,
1328 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1331 struct mlxsw_sp_nexthop_key
{
1332 struct fib_nh
*fib_nh
;
1335 struct mlxsw_sp_nexthop
{
1336 struct list_head neigh_list_node
; /* member of neigh entry list */
1337 struct list_head rif_list_node
;
1338 struct mlxsw_sp_nexthop_group
*nh_grp
; /* pointer back to the group
1341 struct rhash_head ht_node
;
1342 struct mlxsw_sp_nexthop_key key
;
1343 struct mlxsw_sp_rif
*rif
;
1344 u8 should_offload
:1, /* set indicates this neigh is connected and
1345 * should be put to KVD linear area of this group.
1347 offloaded
:1, /* set in case the neigh is actually put into
1348 * KVD linear area of this group.
1350 update
:1; /* set indicates that MAC of this neigh should be
1353 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1356 struct mlxsw_sp_nexthop_group_key
{
1357 struct fib_info
*fi
;
1360 struct mlxsw_sp_nexthop_group
{
1361 struct rhash_head ht_node
;
1362 struct list_head fib_list
; /* list of fib entries that use this group */
1363 struct mlxsw_sp_nexthop_group_key key
;
1364 u8 adj_index_valid
:1,
1365 gateway
:1; /* routes using the group use a gateway */
1369 struct mlxsw_sp_nexthop nexthops
[0];
1370 #define nh_rif nexthops[0].rif
1373 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params
= {
1374 .key_offset
= offsetof(struct mlxsw_sp_nexthop_group
, key
),
1375 .head_offset
= offsetof(struct mlxsw_sp_nexthop_group
, ht_node
),
1376 .key_len
= sizeof(struct mlxsw_sp_nexthop_group_key
),
1379 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp
*mlxsw_sp
,
1380 struct mlxsw_sp_nexthop_group
*nh_grp
)
1382 return rhashtable_insert_fast(&mlxsw_sp
->router
->nexthop_group_ht
,
1384 mlxsw_sp_nexthop_group_ht_params
);
1387 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp
*mlxsw_sp
,
1388 struct mlxsw_sp_nexthop_group
*nh_grp
)
1390 rhashtable_remove_fast(&mlxsw_sp
->router
->nexthop_group_ht
,
1392 mlxsw_sp_nexthop_group_ht_params
);
1395 static struct mlxsw_sp_nexthop_group
*
1396 mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp
*mlxsw_sp
,
1397 struct mlxsw_sp_nexthop_group_key key
)
1399 return rhashtable_lookup_fast(&mlxsw_sp
->router
->nexthop_group_ht
, &key
,
1400 mlxsw_sp_nexthop_group_ht_params
);
1403 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params
= {
1404 .key_offset
= offsetof(struct mlxsw_sp_nexthop
, key
),
1405 .head_offset
= offsetof(struct mlxsw_sp_nexthop
, ht_node
),
1406 .key_len
= sizeof(struct mlxsw_sp_nexthop_key
),
1409 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp
*mlxsw_sp
,
1410 struct mlxsw_sp_nexthop
*nh
)
1412 return rhashtable_insert_fast(&mlxsw_sp
->router
->nexthop_ht
,
1413 &nh
->ht_node
, mlxsw_sp_nexthop_ht_params
);
1416 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp
*mlxsw_sp
,
1417 struct mlxsw_sp_nexthop
*nh
)
1419 rhashtable_remove_fast(&mlxsw_sp
->router
->nexthop_ht
, &nh
->ht_node
,
1420 mlxsw_sp_nexthop_ht_params
);
1423 static struct mlxsw_sp_nexthop
*
1424 mlxsw_sp_nexthop_lookup(struct mlxsw_sp
*mlxsw_sp
,
1425 struct mlxsw_sp_nexthop_key key
)
1427 return rhashtable_lookup_fast(&mlxsw_sp
->router
->nexthop_ht
, &key
,
1428 mlxsw_sp_nexthop_ht_params
);
1431 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp
*mlxsw_sp
,
1432 const struct mlxsw_sp_fib
*fib
,
1433 u32 adj_index
, u16 ecmp_size
,
1437 char raleu_pl
[MLXSW_REG_RALEU_LEN
];
1439 mlxsw_reg_raleu_pack(raleu_pl
,
1440 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
,
1441 fib
->vr
->id
, adj_index
, ecmp_size
, new_adj_index
,
1443 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raleu
), raleu_pl
);
1446 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp
*mlxsw_sp
,
1447 struct mlxsw_sp_nexthop_group
*nh_grp
,
1448 u32 old_adj_index
, u16 old_ecmp_size
)
1450 struct mlxsw_sp_fib_entry
*fib_entry
;
1451 struct mlxsw_sp_fib
*fib
= NULL
;
1454 list_for_each_entry(fib_entry
, &nh_grp
->fib_list
, nexthop_group_node
) {
1455 if (fib
== fib_entry
->fib_node
->fib
)
1457 fib
= fib_entry
->fib_node
->fib
;
1458 err
= mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp
, fib
,
1469 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp
*mlxsw_sp
, u32 adj_index
,
1470 struct mlxsw_sp_nexthop
*nh
)
1472 struct mlxsw_sp_neigh_entry
*neigh_entry
= nh
->neigh_entry
;
1473 char ratr_pl
[MLXSW_REG_RATR_LEN
];
1475 mlxsw_reg_ratr_pack(ratr_pl
, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY
,
1476 true, adj_index
, neigh_entry
->rif
);
1477 mlxsw_reg_ratr_eth_entry_pack(ratr_pl
, neigh_entry
->ha
);
1478 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ratr
), ratr_pl
);
1482 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp
*mlxsw_sp
,
1483 struct mlxsw_sp_nexthop_group
*nh_grp
,
1486 u32 adj_index
= nh_grp
->adj_index
; /* base */
1487 struct mlxsw_sp_nexthop
*nh
;
1491 for (i
= 0; i
< nh_grp
->count
; i
++) {
1492 nh
= &nh_grp
->nexthops
[i
];
1494 if (!nh
->should_offload
) {
1499 if (nh
->update
|| reallocate
) {
1500 err
= mlxsw_sp_nexthop_mac_update(mlxsw_sp
,
1512 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp
*mlxsw_sp
,
1513 struct mlxsw_sp_fib_entry
*fib_entry
);
1516 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node
*fib_node
,
1517 const struct mlxsw_sp_fib_entry
*fib_entry
);
1520 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp
*mlxsw_sp
,
1521 struct mlxsw_sp_nexthop_group
*nh_grp
)
1523 struct mlxsw_sp_fib_entry
*fib_entry
;
1526 list_for_each_entry(fib_entry
, &nh_grp
->fib_list
, nexthop_group_node
) {
1527 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry
->fib_node
,
1530 err
= mlxsw_sp_fib_entry_update(mlxsw_sp
, fib_entry
);
1538 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp
*mlxsw_sp
,
1539 struct mlxsw_sp_nexthop_group
*nh_grp
)
1541 struct mlxsw_sp_nexthop
*nh
;
1542 bool offload_change
= false;
1545 bool old_adj_index_valid
;
1551 if (!nh_grp
->gateway
) {
1552 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1556 for (i
= 0; i
< nh_grp
->count
; i
++) {
1557 nh
= &nh_grp
->nexthops
[i
];
1559 if (nh
->should_offload
^ nh
->offloaded
) {
1560 offload_change
= true;
1561 if (nh
->should_offload
)
1564 if (nh
->should_offload
)
1567 if (!offload_change
) {
1568 /* Nothing was added or removed, so no need to reallocate. Just
1569 * update MAC on existing adjacency indexes.
1571 err
= mlxsw_sp_nexthop_group_mac_update(mlxsw_sp
, nh_grp
,
1574 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to update neigh MAC in adjacency table.\n");
1580 /* No neigh of this group is connected so we just set
1581 * the trap and let everthing flow through kernel.
1585 err
= mlxsw_sp_kvdl_alloc(mlxsw_sp
, ecmp_size
, &adj_index
);
1587 /* We ran out of KVD linear space, just set the
1588 * trap and let everything flow through kernel.
1590 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to allocate KVD linear area for nexthop group.\n");
1593 old_adj_index_valid
= nh_grp
->adj_index_valid
;
1594 old_adj_index
= nh_grp
->adj_index
;
1595 old_ecmp_size
= nh_grp
->ecmp_size
;
1596 nh_grp
->adj_index_valid
= 1;
1597 nh_grp
->adj_index
= adj_index
;
1598 nh_grp
->ecmp_size
= ecmp_size
;
1599 err
= mlxsw_sp_nexthop_group_mac_update(mlxsw_sp
, nh_grp
, true);
1601 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to update neigh MAC in adjacency table.\n");
1605 if (!old_adj_index_valid
) {
1606 /* The trap was set for fib entries, so we have to call
1607 * fib entry update to unset it and use adjacency index.
1609 err
= mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1611 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to add adjacency index to fib entries.\n");
1617 err
= mlxsw_sp_adj_index_mass_update(mlxsw_sp
, nh_grp
,
1618 old_adj_index
, old_ecmp_size
);
1619 mlxsw_sp_kvdl_free(mlxsw_sp
, old_adj_index
);
1621 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to mass-update adjacency index for nexthop group.\n");
1627 old_adj_index_valid
= nh_grp
->adj_index_valid
;
1628 nh_grp
->adj_index_valid
= 0;
1629 for (i
= 0; i
< nh_grp
->count
; i
++) {
1630 nh
= &nh_grp
->nexthops
[i
];
1633 err
= mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1635 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for fib entries.\n");
1636 if (old_adj_index_valid
)
1637 mlxsw_sp_kvdl_free(mlxsw_sp
, nh_grp
->adj_index
);
1640 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop
*nh
,
1643 if (!removing
&& !nh
->should_offload
)
1644 nh
->should_offload
= 1;
1645 else if (removing
&& nh
->offloaded
)
1646 nh
->should_offload
= 0;
1651 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp
*mlxsw_sp
,
1652 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1655 struct mlxsw_sp_nexthop
*nh
;
1657 list_for_each_entry(nh
, &neigh_entry
->nexthop_list
,
1659 __mlxsw_sp_nexthop_neigh_update(nh
, removing
);
1660 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1664 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop
*nh
,
1665 struct mlxsw_sp_rif
*rif
)
1671 list_add(&nh
->rif_list_node
, &rif
->nexthop_list
);
1674 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop
*nh
)
1679 list_del(&nh
->rif_list_node
);
1683 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp
*mlxsw_sp
,
1684 struct mlxsw_sp_nexthop
*nh
)
1686 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1687 struct fib_nh
*fib_nh
= nh
->key
.fib_nh
;
1688 struct neighbour
*n
;
1692 if (!nh
->nh_grp
->gateway
|| nh
->neigh_entry
)
1695 /* Take a reference of neigh here ensuring that neigh would
1696 * not be detructed before the nexthop entry is finished.
1697 * The reference is taken either in neigh_lookup() or
1698 * in neigh_create() in case n is not found.
1700 n
= neigh_lookup(&arp_tbl
, &fib_nh
->nh_gw
, fib_nh
->nh_dev
);
1702 n
= neigh_create(&arp_tbl
, &fib_nh
->nh_gw
, fib_nh
->nh_dev
);
1705 neigh_event_send(n
, NULL
);
1707 neigh_entry
= mlxsw_sp_neigh_entry_lookup(mlxsw_sp
, n
);
1709 neigh_entry
= mlxsw_sp_neigh_entry_create(mlxsw_sp
, n
);
1710 if (IS_ERR(neigh_entry
)) {
1712 goto err_neigh_entry_create
;
1716 /* If that is the first nexthop connected to that neigh, add to
1717 * nexthop_neighs_list
1719 if (list_empty(&neigh_entry
->nexthop_list
))
1720 list_add_tail(&neigh_entry
->nexthop_neighs_list_node
,
1721 &mlxsw_sp
->router
->nexthop_neighs_list
);
1723 nh
->neigh_entry
= neigh_entry
;
1724 list_add_tail(&nh
->neigh_list_node
, &neigh_entry
->nexthop_list
);
1725 read_lock_bh(&n
->lock
);
1726 nud_state
= n
->nud_state
;
1728 read_unlock_bh(&n
->lock
);
1729 __mlxsw_sp_nexthop_neigh_update(nh
, !(nud_state
& NUD_VALID
&& !dead
));
1733 err_neigh_entry_create
:
1738 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp
*mlxsw_sp
,
1739 struct mlxsw_sp_nexthop
*nh
)
1741 struct mlxsw_sp_neigh_entry
*neigh_entry
= nh
->neigh_entry
;
1742 struct neighbour
*n
;
1746 n
= neigh_entry
->key
.n
;
1748 __mlxsw_sp_nexthop_neigh_update(nh
, true);
1749 list_del(&nh
->neigh_list_node
);
1750 nh
->neigh_entry
= NULL
;
1752 /* If that is the last nexthop connected to that neigh, remove from
1753 * nexthop_neighs_list
1755 if (list_empty(&neigh_entry
->nexthop_list
))
1756 list_del(&neigh_entry
->nexthop_neighs_list_node
);
1758 if (!neigh_entry
->connected
&& list_empty(&neigh_entry
->nexthop_list
))
1759 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1764 static int mlxsw_sp_nexthop_init(struct mlxsw_sp
*mlxsw_sp
,
1765 struct mlxsw_sp_nexthop_group
*nh_grp
,
1766 struct mlxsw_sp_nexthop
*nh
,
1767 struct fib_nh
*fib_nh
)
1769 struct net_device
*dev
= fib_nh
->nh_dev
;
1770 struct in_device
*in_dev
;
1771 struct mlxsw_sp_rif
*rif
;
1774 nh
->nh_grp
= nh_grp
;
1775 nh
->key
.fib_nh
= fib_nh
;
1776 err
= mlxsw_sp_nexthop_insert(mlxsw_sp
, nh
);
1783 in_dev
= __in_dev_get_rtnl(dev
);
1784 if (in_dev
&& IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
) &&
1785 fib_nh
->nh_flags
& RTNH_F_LINKDOWN
)
1788 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
1791 mlxsw_sp_nexthop_rif_init(nh
, rif
);
1793 err
= mlxsw_sp_nexthop_neigh_init(mlxsw_sp
, nh
);
1795 goto err_nexthop_neigh_init
;
1799 err_nexthop_neigh_init
:
1800 mlxsw_sp_nexthop_rif_fini(nh
);
1801 mlxsw_sp_nexthop_remove(mlxsw_sp
, nh
);
1805 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp
*mlxsw_sp
,
1806 struct mlxsw_sp_nexthop
*nh
)
1808 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1809 mlxsw_sp_nexthop_rif_fini(nh
);
1810 mlxsw_sp_nexthop_remove(mlxsw_sp
, nh
);
1813 static void mlxsw_sp_nexthop_event(struct mlxsw_sp
*mlxsw_sp
,
1814 unsigned long event
, struct fib_nh
*fib_nh
)
1816 struct mlxsw_sp_nexthop_key key
;
1817 struct mlxsw_sp_nexthop
*nh
;
1818 struct mlxsw_sp_rif
*rif
;
1820 if (mlxsw_sp
->router
->aborted
)
1823 key
.fib_nh
= fib_nh
;
1824 nh
= mlxsw_sp_nexthop_lookup(mlxsw_sp
, key
);
1825 if (WARN_ON_ONCE(!nh
))
1828 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, fib_nh
->nh_dev
);
1833 case FIB_EVENT_NH_ADD
:
1834 mlxsw_sp_nexthop_rif_init(nh
, rif
);
1835 mlxsw_sp_nexthop_neigh_init(mlxsw_sp
, nh
);
1837 case FIB_EVENT_NH_DEL
:
1838 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1839 mlxsw_sp_nexthop_rif_fini(nh
);
1843 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1846 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
1847 struct mlxsw_sp_rif
*rif
)
1849 struct mlxsw_sp_nexthop
*nh
, *tmp
;
1851 list_for_each_entry_safe(nh
, tmp
, &rif
->nexthop_list
, rif_list_node
) {
1852 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1853 mlxsw_sp_nexthop_rif_fini(nh
);
1854 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1858 static struct mlxsw_sp_nexthop_group
*
1859 mlxsw_sp_nexthop_group_create(struct mlxsw_sp
*mlxsw_sp
, struct fib_info
*fi
)
1861 struct mlxsw_sp_nexthop_group
*nh_grp
;
1862 struct mlxsw_sp_nexthop
*nh
;
1863 struct fib_nh
*fib_nh
;
1868 alloc_size
= sizeof(*nh_grp
) +
1869 fi
->fib_nhs
* sizeof(struct mlxsw_sp_nexthop
);
1870 nh_grp
= kzalloc(alloc_size
, GFP_KERNEL
);
1872 return ERR_PTR(-ENOMEM
);
1873 INIT_LIST_HEAD(&nh_grp
->fib_list
);
1874 nh_grp
->gateway
= fi
->fib_nh
->nh_scope
== RT_SCOPE_LINK
;
1875 nh_grp
->count
= fi
->fib_nhs
;
1876 nh_grp
->key
.fi
= fi
;
1878 for (i
= 0; i
< nh_grp
->count
; i
++) {
1879 nh
= &nh_grp
->nexthops
[i
];
1880 fib_nh
= &fi
->fib_nh
[i
];
1881 err
= mlxsw_sp_nexthop_init(mlxsw_sp
, nh_grp
, nh
, fib_nh
);
1883 goto err_nexthop_init
;
1885 err
= mlxsw_sp_nexthop_group_insert(mlxsw_sp
, nh_grp
);
1887 goto err_nexthop_group_insert
;
1888 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh_grp
);
1891 err_nexthop_group_insert
:
1893 for (i
--; i
>= 0; i
--) {
1894 nh
= &nh_grp
->nexthops
[i
];
1895 mlxsw_sp_nexthop_fini(mlxsw_sp
, nh
);
1897 fib_info_put(nh_grp
->key
.fi
);
1899 return ERR_PTR(err
);
1903 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp
*mlxsw_sp
,
1904 struct mlxsw_sp_nexthop_group
*nh_grp
)
1906 struct mlxsw_sp_nexthop
*nh
;
1909 mlxsw_sp_nexthop_group_remove(mlxsw_sp
, nh_grp
);
1910 for (i
= 0; i
< nh_grp
->count
; i
++) {
1911 nh
= &nh_grp
->nexthops
[i
];
1912 mlxsw_sp_nexthop_fini(mlxsw_sp
, nh
);
1914 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh_grp
);
1915 WARN_ON_ONCE(nh_grp
->adj_index_valid
);
1916 fib_info_put(nh_grp
->key
.fi
);
1920 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp
*mlxsw_sp
,
1921 struct mlxsw_sp_fib_entry
*fib_entry
,
1922 struct fib_info
*fi
)
1924 struct mlxsw_sp_nexthop_group_key key
;
1925 struct mlxsw_sp_nexthop_group
*nh_grp
;
1928 nh_grp
= mlxsw_sp_nexthop_group_lookup(mlxsw_sp
, key
);
1930 nh_grp
= mlxsw_sp_nexthop_group_create(mlxsw_sp
, fi
);
1932 return PTR_ERR(nh_grp
);
1934 list_add_tail(&fib_entry
->nexthop_group_node
, &nh_grp
->fib_list
);
1935 fib_entry
->nh_group
= nh_grp
;
1939 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp
*mlxsw_sp
,
1940 struct mlxsw_sp_fib_entry
*fib_entry
)
1942 struct mlxsw_sp_nexthop_group
*nh_grp
= fib_entry
->nh_group
;
1944 list_del(&fib_entry
->nexthop_group_node
);
1945 if (!list_empty(&nh_grp
->fib_list
))
1947 mlxsw_sp_nexthop_group_destroy(mlxsw_sp
, nh_grp
);
1951 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry
*fib_entry
)
1953 struct mlxsw_sp_nexthop_group
*nh_group
= fib_entry
->nh_group
;
1955 if (fib_entry
->params
.tos
)
1958 switch (fib_entry
->type
) {
1959 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
:
1960 return !!nh_group
->adj_index_valid
;
1961 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
:
1962 return !!nh_group
->nh_rif
;
1968 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry
*fib_entry
)
1970 fib_entry
->offloaded
= true;
1972 switch (fib_entry
->fib_node
->fib
->proto
) {
1973 case MLXSW_SP_L3_PROTO_IPV4
:
1974 fib_info_offload_inc(fib_entry
->nh_group
->key
.fi
);
1976 case MLXSW_SP_L3_PROTO_IPV6
:
1982 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry
*fib_entry
)
1984 switch (fib_entry
->fib_node
->fib
->proto
) {
1985 case MLXSW_SP_L3_PROTO_IPV4
:
1986 fib_info_offload_dec(fib_entry
->nh_group
->key
.fi
);
1988 case MLXSW_SP_L3_PROTO_IPV6
:
1992 fib_entry
->offloaded
= false;
1996 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry
*fib_entry
,
1997 enum mlxsw_reg_ralue_op op
, int err
)
2000 case MLXSW_REG_RALUE_OP_WRITE_DELETE
:
2001 if (!fib_entry
->offloaded
)
2003 return mlxsw_sp_fib_entry_offload_unset(fib_entry
);
2004 case MLXSW_REG_RALUE_OP_WRITE_WRITE
:
2007 if (mlxsw_sp_fib_entry_should_offload(fib_entry
) &&
2008 !fib_entry
->offloaded
)
2009 mlxsw_sp_fib_entry_offload_set(fib_entry
);
2010 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry
) &&
2011 fib_entry
->offloaded
)
2012 mlxsw_sp_fib_entry_offload_unset(fib_entry
);
2019 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp
*mlxsw_sp
,
2020 struct mlxsw_sp_fib_entry
*fib_entry
,
2021 enum mlxsw_reg_ralue_op op
)
2023 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
2024 struct mlxsw_sp_fib
*fib
= fib_entry
->fib_node
->fib
;
2025 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
2026 enum mlxsw_reg_ralue_trap_action trap_action
;
2028 u32 adjacency_index
= 0;
2031 /* In case the nexthop group adjacency index is valid, use it
2032 * with provided ECMP size. Otherwise, setup trap and pass
2033 * traffic to kernel.
2035 if (mlxsw_sp_fib_entry_should_offload(fib_entry
)) {
2036 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_NOP
;
2037 adjacency_index
= fib_entry
->nh_group
->adj_index
;
2038 ecmp_size
= fib_entry
->nh_group
->ecmp_size
;
2040 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_TRAP
;
2041 trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
;
2044 mlxsw_reg_ralue_pack4(ralue_pl
,
2045 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, op
,
2046 fib
->vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
2048 mlxsw_reg_ralue_act_remote_pack(ralue_pl
, trap_action
, trap_id
,
2049 adjacency_index
, ecmp_size
);
2050 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
2053 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp
*mlxsw_sp
,
2054 struct mlxsw_sp_fib_entry
*fib_entry
,
2055 enum mlxsw_reg_ralue_op op
)
2057 struct mlxsw_sp_rif
*rif
= fib_entry
->nh_group
->nh_rif
;
2058 struct mlxsw_sp_fib
*fib
= fib_entry
->fib_node
->fib
;
2059 enum mlxsw_reg_ralue_trap_action trap_action
;
2060 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
2061 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
2065 if (mlxsw_sp_fib_entry_should_offload(fib_entry
)) {
2066 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_NOP
;
2067 rif_index
= rif
->rif_index
;
2069 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_TRAP
;
2070 trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
;
2073 mlxsw_reg_ralue_pack4(ralue_pl
,
2074 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, op
,
2075 fib
->vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
2077 mlxsw_reg_ralue_act_local_pack(ralue_pl
, trap_action
, trap_id
,
2079 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
2082 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp
*mlxsw_sp
,
2083 struct mlxsw_sp_fib_entry
*fib_entry
,
2084 enum mlxsw_reg_ralue_op op
)
2086 struct mlxsw_sp_fib
*fib
= fib_entry
->fib_node
->fib
;
2087 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
2088 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
2090 mlxsw_reg_ralue_pack4(ralue_pl
,
2091 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, op
,
2092 fib
->vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
2094 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl
);
2095 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
2098 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp
*mlxsw_sp
,
2099 struct mlxsw_sp_fib_entry
*fib_entry
,
2100 enum mlxsw_reg_ralue_op op
)
2102 switch (fib_entry
->type
) {
2103 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
:
2104 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp
, fib_entry
, op
);
2105 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
:
2106 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp
, fib_entry
, op
);
2107 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP
:
2108 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp
, fib_entry
, op
);
2113 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp
*mlxsw_sp
,
2114 struct mlxsw_sp_fib_entry
*fib_entry
,
2115 enum mlxsw_reg_ralue_op op
)
2119 switch (fib_entry
->fib_node
->fib
->proto
) {
2120 case MLXSW_SP_L3_PROTO_IPV4
:
2121 err
= mlxsw_sp_fib_entry_op4(mlxsw_sp
, fib_entry
, op
);
2123 case MLXSW_SP_L3_PROTO_IPV6
:
2126 mlxsw_sp_fib_entry_offload_refresh(fib_entry
, op
, err
);
2130 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp
*mlxsw_sp
,
2131 struct mlxsw_sp_fib_entry
*fib_entry
)
2133 return mlxsw_sp_fib_entry_op(mlxsw_sp
, fib_entry
,
2134 MLXSW_REG_RALUE_OP_WRITE_WRITE
);
2137 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp
*mlxsw_sp
,
2138 struct mlxsw_sp_fib_entry
*fib_entry
)
2140 return mlxsw_sp_fib_entry_op(mlxsw_sp
, fib_entry
,
2141 MLXSW_REG_RALUE_OP_WRITE_DELETE
);
2145 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp
*mlxsw_sp
,
2146 const struct fib_entry_notifier_info
*fen_info
,
2147 struct mlxsw_sp_fib_entry
*fib_entry
)
2149 struct fib_info
*fi
= fen_info
->fi
;
2151 switch (fen_info
->type
) {
2152 case RTN_BROADCAST
: /* fall through */
2154 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_TRAP
;
2156 case RTN_UNREACHABLE
: /* fall through */
2157 case RTN_BLACKHOLE
: /* fall through */
2159 /* Packets hitting these routes need to be trapped, but
2160 * can do so with a lower priority than packets directed
2161 * at the host, so use action type local instead of trap.
2163 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
;
2166 if (fi
->fib_nh
->nh_scope
!= RT_SCOPE_LINK
)
2167 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
;
2169 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
;
2176 static struct mlxsw_sp_fib_entry
*
2177 mlxsw_sp_fib4_entry_create(struct mlxsw_sp
*mlxsw_sp
,
2178 struct mlxsw_sp_fib_node
*fib_node
,
2179 const struct fib_entry_notifier_info
*fen_info
)
2181 struct mlxsw_sp_fib_entry
*fib_entry
;
2184 fib_entry
= kzalloc(sizeof(*fib_entry
), GFP_KERNEL
);
2187 goto err_fib_entry_alloc
;
2190 err
= mlxsw_sp_fib4_entry_type_set(mlxsw_sp
, fen_info
, fib_entry
);
2192 goto err_fib4_entry_type_set
;
2194 err
= mlxsw_sp_nexthop_group_get(mlxsw_sp
, fib_entry
, fen_info
->fi
);
2196 goto err_nexthop_group_get
;
2198 fib_entry
->params
.prio
= fen_info
->fi
->fib_priority
;
2199 fib_entry
->params
.tb_id
= fen_info
->tb_id
;
2200 fib_entry
->params
.type
= fen_info
->type
;
2201 fib_entry
->params
.tos
= fen_info
->tos
;
2203 fib_entry
->fib_node
= fib_node
;
2207 err_nexthop_group_get
:
2208 err_fib4_entry_type_set
:
2210 err_fib_entry_alloc
:
2211 return ERR_PTR(err
);
2214 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
2215 struct mlxsw_sp_fib_entry
*fib_entry
)
2217 mlxsw_sp_nexthop_group_put(mlxsw_sp
, fib_entry
);
2221 static struct mlxsw_sp_fib_node
*
2222 mlxsw_sp_fib4_node_get(struct mlxsw_sp
*mlxsw_sp
,
2223 const struct fib_entry_notifier_info
*fen_info
);
2225 static struct mlxsw_sp_fib_entry
*
2226 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp
*mlxsw_sp
,
2227 const struct fib_entry_notifier_info
*fen_info
)
2229 struct mlxsw_sp_fib_entry
*fib_entry
;
2230 struct mlxsw_sp_fib_node
*fib_node
;
2232 fib_node
= mlxsw_sp_fib4_node_get(mlxsw_sp
, fen_info
);
2233 if (IS_ERR(fib_node
))
2236 list_for_each_entry(fib_entry
, &fib_node
->entry_list
, list
) {
2237 if (fib_entry
->params
.tb_id
== fen_info
->tb_id
&&
2238 fib_entry
->params
.tos
== fen_info
->tos
&&
2239 fib_entry
->params
.type
== fen_info
->type
&&
2240 fib_entry
->nh_group
->key
.fi
== fen_info
->fi
) {
2248 static const struct rhashtable_params mlxsw_sp_fib_ht_params
= {
2249 .key_offset
= offsetof(struct mlxsw_sp_fib_node
, key
),
2250 .head_offset
= offsetof(struct mlxsw_sp_fib_node
, ht_node
),
2251 .key_len
= sizeof(struct mlxsw_sp_fib_key
),
2252 .automatic_shrinking
= true,
2255 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib
*fib
,
2256 struct mlxsw_sp_fib_node
*fib_node
)
2258 return rhashtable_insert_fast(&fib
->ht
, &fib_node
->ht_node
,
2259 mlxsw_sp_fib_ht_params
);
2262 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib
*fib
,
2263 struct mlxsw_sp_fib_node
*fib_node
)
2265 rhashtable_remove_fast(&fib
->ht
, &fib_node
->ht_node
,
2266 mlxsw_sp_fib_ht_params
);
2269 static struct mlxsw_sp_fib_node
*
2270 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib
*fib
, const void *addr
,
2271 size_t addr_len
, unsigned char prefix_len
)
2273 struct mlxsw_sp_fib_key key
;
2275 memset(&key
, 0, sizeof(key
));
2276 memcpy(key
.addr
, addr
, addr_len
);
2277 key
.prefix_len
= prefix_len
;
2278 return rhashtable_lookup_fast(&fib
->ht
, &key
, mlxsw_sp_fib_ht_params
);
2281 static struct mlxsw_sp_fib_node
*
2282 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib
*fib
, const void *addr
,
2283 size_t addr_len
, unsigned char prefix_len
)
2285 struct mlxsw_sp_fib_node
*fib_node
;
2287 fib_node
= kzalloc(sizeof(*fib_node
), GFP_KERNEL
);
2291 INIT_LIST_HEAD(&fib_node
->entry_list
);
2292 list_add(&fib_node
->list
, &fib
->node_list
);
2293 memcpy(fib_node
->key
.addr
, addr
, addr_len
);
2294 fib_node
->key
.prefix_len
= prefix_len
;
2299 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node
*fib_node
)
2301 list_del(&fib_node
->list
);
2302 WARN_ON(!list_empty(&fib_node
->entry_list
));
2307 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node
*fib_node
,
2308 const struct mlxsw_sp_fib_entry
*fib_entry
)
2310 return list_first_entry(&fib_node
->entry_list
,
2311 struct mlxsw_sp_fib_entry
, list
) == fib_entry
;
2314 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node
*fib_node
)
2316 unsigned char prefix_len
= fib_node
->key
.prefix_len
;
2317 struct mlxsw_sp_fib
*fib
= fib_node
->fib
;
2319 if (fib
->prefix_ref_count
[prefix_len
]++ == 0)
2320 mlxsw_sp_prefix_usage_set(&fib
->prefix_usage
, prefix_len
);
2323 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node
*fib_node
)
2325 unsigned char prefix_len
= fib_node
->key
.prefix_len
;
2326 struct mlxsw_sp_fib
*fib
= fib_node
->fib
;
2328 if (--fib
->prefix_ref_count
[prefix_len
] == 0)
2329 mlxsw_sp_prefix_usage_clear(&fib
->prefix_usage
, prefix_len
);
2332 static int mlxsw_sp_fib_node_init(struct mlxsw_sp
*mlxsw_sp
,
2333 struct mlxsw_sp_fib_node
*fib_node
,
2334 struct mlxsw_sp_fib
*fib
)
2336 struct mlxsw_sp_prefix_usage req_prefix_usage
;
2337 struct mlxsw_sp_lpm_tree
*lpm_tree
;
2340 err
= mlxsw_sp_fib_node_insert(fib
, fib_node
);
2343 fib_node
->fib
= fib
;
2345 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage
, &fib
->prefix_usage
);
2346 mlxsw_sp_prefix_usage_set(&req_prefix_usage
, fib_node
->key
.prefix_len
);
2348 if (!mlxsw_sp_prefix_usage_none(&fib
->prefix_usage
)) {
2349 err
= mlxsw_sp_vr_lpm_tree_check(mlxsw_sp
, fib
,
2352 goto err_tree_check
;
2354 lpm_tree
= mlxsw_sp_lpm_tree_get(mlxsw_sp
, &req_prefix_usage
,
2356 if (IS_ERR(lpm_tree
))
2357 return PTR_ERR(lpm_tree
);
2358 fib
->lpm_tree
= lpm_tree
;
2359 err
= mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp
, fib
);
2364 mlxsw_sp_fib_node_prefix_inc(fib_node
);
2369 fib
->lpm_tree
= NULL
;
2370 mlxsw_sp_lpm_tree_put(mlxsw_sp
, lpm_tree
);
2372 fib_node
->fib
= NULL
;
2373 mlxsw_sp_fib_node_remove(fib
, fib_node
);
2377 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp
*mlxsw_sp
,
2378 struct mlxsw_sp_fib_node
*fib_node
)
2380 struct mlxsw_sp_lpm_tree
*lpm_tree
= fib_node
->fib
->lpm_tree
;
2381 struct mlxsw_sp_fib
*fib
= fib_node
->fib
;
2383 mlxsw_sp_fib_node_prefix_dec(fib_node
);
2385 if (mlxsw_sp_prefix_usage_none(&fib
->prefix_usage
)) {
2386 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp
, fib
);
2387 fib
->lpm_tree
= NULL
;
2388 mlxsw_sp_lpm_tree_put(mlxsw_sp
, lpm_tree
);
2390 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp
, fib
, &fib
->prefix_usage
);
2393 fib_node
->fib
= NULL
;
2394 mlxsw_sp_fib_node_remove(fib
, fib_node
);
2397 static struct mlxsw_sp_fib_node
*
2398 mlxsw_sp_fib4_node_get(struct mlxsw_sp
*mlxsw_sp
,
2399 const struct fib_entry_notifier_info
*fen_info
)
2401 struct mlxsw_sp_fib_node
*fib_node
;
2402 struct mlxsw_sp_fib
*fib
;
2403 struct mlxsw_sp_vr
*vr
;
2406 vr
= mlxsw_sp_vr_get(mlxsw_sp
, fen_info
->tb_id
);
2408 return ERR_CAST(vr
);
2409 fib
= mlxsw_sp_vr_fib(vr
, MLXSW_SP_L3_PROTO_IPV4
);
2411 fib_node
= mlxsw_sp_fib_node_lookup(fib
, &fen_info
->dst
,
2412 sizeof(fen_info
->dst
),
2417 fib_node
= mlxsw_sp_fib_node_create(fib
, &fen_info
->dst
,
2418 sizeof(fen_info
->dst
),
2422 goto err_fib_node_create
;
2425 err
= mlxsw_sp_fib_node_init(mlxsw_sp
, fib_node
, fib
);
2427 goto err_fib_node_init
;
2432 mlxsw_sp_fib_node_destroy(fib_node
);
2433 err_fib_node_create
:
2434 mlxsw_sp_vr_put(vr
);
2435 return ERR_PTR(err
);
2438 static void mlxsw_sp_fib4_node_put(struct mlxsw_sp
*mlxsw_sp
,
2439 struct mlxsw_sp_fib_node
*fib_node
)
2441 struct mlxsw_sp_vr
*vr
= fib_node
->fib
->vr
;
2443 if (!list_empty(&fib_node
->entry_list
))
2445 mlxsw_sp_fib_node_fini(mlxsw_sp
, fib_node
);
2446 mlxsw_sp_fib_node_destroy(fib_node
);
2447 mlxsw_sp_vr_put(vr
);
2450 static struct mlxsw_sp_fib_entry
*
2451 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node
*fib_node
,
2452 const struct mlxsw_sp_fib_entry_params
*params
)
2454 struct mlxsw_sp_fib_entry
*fib_entry
;
2456 list_for_each_entry(fib_entry
, &fib_node
->entry_list
, list
) {
2457 if (fib_entry
->params
.tb_id
> params
->tb_id
)
2459 if (fib_entry
->params
.tb_id
!= params
->tb_id
)
2461 if (fib_entry
->params
.tos
> params
->tos
)
2463 if (fib_entry
->params
.prio
>= params
->prio
||
2464 fib_entry
->params
.tos
< params
->tos
)
2471 static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry
*fib_entry
,
2472 struct mlxsw_sp_fib_entry
*new_entry
)
2474 struct mlxsw_sp_fib_node
*fib_node
;
2476 if (WARN_ON(!fib_entry
))
2479 fib_node
= fib_entry
->fib_node
;
2480 list_for_each_entry_from(fib_entry
, &fib_node
->entry_list
, list
) {
2481 if (fib_entry
->params
.tb_id
!= new_entry
->params
.tb_id
||
2482 fib_entry
->params
.tos
!= new_entry
->params
.tos
||
2483 fib_entry
->params
.prio
!= new_entry
->params
.prio
)
2487 list_add_tail(&new_entry
->list
, &fib_entry
->list
);
2492 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node
*fib_node
,
2493 struct mlxsw_sp_fib_entry
*new_entry
,
2494 bool replace
, bool append
)
2496 struct mlxsw_sp_fib_entry
*fib_entry
;
2498 fib_entry
= mlxsw_sp_fib4_node_entry_find(fib_node
, &new_entry
->params
);
2501 return mlxsw_sp_fib4_node_list_append(fib_entry
, new_entry
);
2502 if (replace
&& WARN_ON(!fib_entry
))
2505 /* Insert new entry before replaced one, so that we can later
2506 * remove the second.
2509 list_add_tail(&new_entry
->list
, &fib_entry
->list
);
2511 struct mlxsw_sp_fib_entry
*last
;
2513 list_for_each_entry(last
, &fib_node
->entry_list
, list
) {
2514 if (new_entry
->params
.tb_id
> last
->params
.tb_id
)
2520 list_add(&new_entry
->list
, &fib_entry
->list
);
2522 list_add(&new_entry
->list
, &fib_node
->entry_list
);
2529 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry
*fib_entry
)
2531 list_del(&fib_entry
->list
);
2535 mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp
*mlxsw_sp
,
2536 const struct mlxsw_sp_fib_node
*fib_node
,
2537 struct mlxsw_sp_fib_entry
*fib_entry
)
2539 if (!mlxsw_sp_fib_node_entry_is_first(fib_node
, fib_entry
))
2542 /* To prevent packet loss, overwrite the previously offloaded
2545 if (!list_is_singular(&fib_node
->entry_list
)) {
2546 enum mlxsw_reg_ralue_op op
= MLXSW_REG_RALUE_OP_WRITE_DELETE
;
2547 struct mlxsw_sp_fib_entry
*n
= list_next_entry(fib_entry
, list
);
2549 mlxsw_sp_fib_entry_offload_refresh(n
, op
, 0);
2552 return mlxsw_sp_fib_entry_update(mlxsw_sp
, fib_entry
);
2556 mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp
*mlxsw_sp
,
2557 const struct mlxsw_sp_fib_node
*fib_node
,
2558 struct mlxsw_sp_fib_entry
*fib_entry
)
2560 if (!mlxsw_sp_fib_node_entry_is_first(fib_node
, fib_entry
))
2563 /* Promote the next entry by overwriting the deleted entry */
2564 if (!list_is_singular(&fib_node
->entry_list
)) {
2565 struct mlxsw_sp_fib_entry
*n
= list_next_entry(fib_entry
, list
);
2566 enum mlxsw_reg_ralue_op op
= MLXSW_REG_RALUE_OP_WRITE_DELETE
;
2568 mlxsw_sp_fib_entry_update(mlxsw_sp
, n
);
2569 mlxsw_sp_fib_entry_offload_refresh(fib_entry
, op
, 0);
2573 mlxsw_sp_fib_entry_del(mlxsw_sp
, fib_entry
);
2576 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp
*mlxsw_sp
,
2577 struct mlxsw_sp_fib_entry
*fib_entry
,
2578 bool replace
, bool append
)
2580 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2583 err
= mlxsw_sp_fib4_node_list_insert(fib_node
, fib_entry
, replace
,
2588 err
= mlxsw_sp_fib4_node_entry_add(mlxsw_sp
, fib_node
, fib_entry
);
2590 goto err_fib4_node_entry_add
;
2594 err_fib4_node_entry_add
:
2595 mlxsw_sp_fib4_node_list_remove(fib_entry
);
2600 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp
*mlxsw_sp
,
2601 struct mlxsw_sp_fib_entry
*fib_entry
)
2603 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2605 mlxsw_sp_fib4_node_entry_del(mlxsw_sp
, fib_node
, fib_entry
);
2606 mlxsw_sp_fib4_node_list_remove(fib_entry
);
2609 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp
*mlxsw_sp
,
2610 struct mlxsw_sp_fib_entry
*fib_entry
,
2613 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2614 struct mlxsw_sp_fib_entry
*replaced
;
2619 /* We inserted the new entry before replaced one */
2620 replaced
= list_next_entry(fib_entry
, list
);
2622 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, replaced
);
2623 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, replaced
);
2624 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2628 mlxsw_sp_router_fib4_add(struct mlxsw_sp
*mlxsw_sp
,
2629 const struct fib_entry_notifier_info
*fen_info
,
2630 bool replace
, bool append
)
2632 struct mlxsw_sp_fib_entry
*fib_entry
;
2633 struct mlxsw_sp_fib_node
*fib_node
;
2636 if (mlxsw_sp
->router
->aborted
)
2639 fib_node
= mlxsw_sp_fib4_node_get(mlxsw_sp
, fen_info
);
2640 if (IS_ERR(fib_node
)) {
2641 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to get FIB node\n");
2642 return PTR_ERR(fib_node
);
2645 fib_entry
= mlxsw_sp_fib4_entry_create(mlxsw_sp
, fib_node
, fen_info
);
2646 if (IS_ERR(fib_entry
)) {
2647 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to create FIB entry\n");
2648 err
= PTR_ERR(fib_entry
);
2649 goto err_fib4_entry_create
;
2652 err
= mlxsw_sp_fib4_node_entry_link(mlxsw_sp
, fib_entry
, replace
,
2655 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to link FIB entry to node\n");
2656 goto err_fib4_node_entry_link
;
2659 mlxsw_sp_fib4_entry_replace(mlxsw_sp
, fib_entry
, replace
);
2663 err_fib4_node_entry_link
:
2664 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2665 err_fib4_entry_create
:
2666 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2670 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp
*mlxsw_sp
,
2671 struct fib_entry_notifier_info
*fen_info
)
2673 struct mlxsw_sp_fib_entry
*fib_entry
;
2674 struct mlxsw_sp_fib_node
*fib_node
;
2676 if (mlxsw_sp
->router
->aborted
)
2679 fib_entry
= mlxsw_sp_fib4_entry_lookup(mlxsw_sp
, fen_info
);
2680 if (WARN_ON(!fib_entry
))
2682 fib_node
= fib_entry
->fib_node
;
2684 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, fib_entry
);
2685 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2686 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2689 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp
*mlxsw_sp
)
2691 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
2692 char ralst_pl
[MLXSW_REG_RALST_LEN
];
2695 mlxsw_reg_ralta_pack(ralta_pl
, true, MLXSW_REG_RALXX_PROTOCOL_IPV4
,
2696 MLXSW_SP_LPM_TREE_MIN
);
2697 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
2701 mlxsw_reg_ralst_pack(ralst_pl
, 0xff, MLXSW_SP_LPM_TREE_MIN
);
2702 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralst
), ralst_pl
);
2706 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
2707 struct mlxsw_sp_vr
*vr
= &mlxsw_sp
->router
->vrs
[i
];
2708 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
2709 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
2711 if (!mlxsw_sp_vr_is_used(vr
))
2714 mlxsw_reg_raltb_pack(raltb_pl
, vr
->id
,
2715 MLXSW_REG_RALXX_PROTOCOL_IPV4
,
2716 MLXSW_SP_LPM_TREE_MIN
);
2717 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
),
2722 mlxsw_reg_ralue_pack4(ralue_pl
, MLXSW_SP_L3_PROTO_IPV4
,
2723 MLXSW_REG_RALUE_OP_WRITE_WRITE
, vr
->id
, 0,
2725 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl
);
2726 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
),
2735 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp
*mlxsw_sp
,
2736 struct mlxsw_sp_fib_node
*fib_node
)
2738 struct mlxsw_sp_fib_entry
*fib_entry
, *tmp
;
2740 list_for_each_entry_safe(fib_entry
, tmp
, &fib_node
->entry_list
, list
) {
2741 bool do_break
= &tmp
->list
== &fib_node
->entry_list
;
2743 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, fib_entry
);
2744 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2745 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2746 /* Break when entry list is empty and node was freed.
2747 * Otherwise, we'll access freed memory in the next
2755 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp
*mlxsw_sp
,
2756 struct mlxsw_sp_fib_node
*fib_node
)
2758 switch (fib_node
->fib
->proto
) {
2759 case MLXSW_SP_L3_PROTO_IPV4
:
2760 mlxsw_sp_fib4_node_flush(mlxsw_sp
, fib_node
);
2762 case MLXSW_SP_L3_PROTO_IPV6
:
2768 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp
*mlxsw_sp
,
2769 struct mlxsw_sp_vr
*vr
,
2770 enum mlxsw_sp_l3proto proto
)
2772 struct mlxsw_sp_fib
*fib
= mlxsw_sp_vr_fib(vr
, proto
);
2773 struct mlxsw_sp_fib_node
*fib_node
, *tmp
;
2775 list_for_each_entry_safe(fib_node
, tmp
, &fib
->node_list
, list
) {
2776 bool do_break
= &tmp
->list
== &fib
->node_list
;
2778 mlxsw_sp_fib_node_flush(mlxsw_sp
, fib_node
);
2784 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp
*mlxsw_sp
)
2788 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
2789 struct mlxsw_sp_vr
*vr
= &mlxsw_sp
->router
->vrs
[i
];
2791 if (!mlxsw_sp_vr_is_used(vr
))
2793 mlxsw_sp_vr_fib_flush(mlxsw_sp
, vr
, MLXSW_SP_L3_PROTO_IPV4
);
2797 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp
*mlxsw_sp
)
2801 if (mlxsw_sp
->router
->aborted
)
2803 dev_warn(mlxsw_sp
->bus_info
->dev
, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
2804 mlxsw_sp_router_fib_flush(mlxsw_sp
);
2805 mlxsw_sp
->router
->aborted
= true;
2806 err
= mlxsw_sp_router_set_abort_trap(mlxsw_sp
);
2808 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to set abort trap.\n");
2811 struct mlxsw_sp_fib_event_work
{
2812 struct work_struct work
;
2814 struct fib_entry_notifier_info fen_info
;
2815 struct fib_rule_notifier_info fr_info
;
2816 struct fib_nh_notifier_info fnh_info
;
2818 struct mlxsw_sp
*mlxsw_sp
;
2819 unsigned long event
;
2822 static void mlxsw_sp_router_fib_event_work(struct work_struct
*work
)
2824 struct mlxsw_sp_fib_event_work
*fib_work
=
2825 container_of(work
, struct mlxsw_sp_fib_event_work
, work
);
2826 struct mlxsw_sp
*mlxsw_sp
= fib_work
->mlxsw_sp
;
2827 struct fib_rule
*rule
;
2828 bool replace
, append
;
2831 /* Protect internal structures from changes */
2833 switch (fib_work
->event
) {
2834 case FIB_EVENT_ENTRY_REPLACE
: /* fall through */
2835 case FIB_EVENT_ENTRY_APPEND
: /* fall through */
2836 case FIB_EVENT_ENTRY_ADD
:
2837 replace
= fib_work
->event
== FIB_EVENT_ENTRY_REPLACE
;
2838 append
= fib_work
->event
== FIB_EVENT_ENTRY_APPEND
;
2839 err
= mlxsw_sp_router_fib4_add(mlxsw_sp
, &fib_work
->fen_info
,
2842 mlxsw_sp_router_fib4_abort(mlxsw_sp
);
2843 fib_info_put(fib_work
->fen_info
.fi
);
2845 case FIB_EVENT_ENTRY_DEL
:
2846 mlxsw_sp_router_fib4_del(mlxsw_sp
, &fib_work
->fen_info
);
2847 fib_info_put(fib_work
->fen_info
.fi
);
2849 case FIB_EVENT_RULE_ADD
: /* fall through */
2850 case FIB_EVENT_RULE_DEL
:
2851 rule
= fib_work
->fr_info
.rule
;
2852 if (!fib4_rule_default(rule
) && !rule
->l3mdev
)
2853 mlxsw_sp_router_fib4_abort(mlxsw_sp
);
2856 case FIB_EVENT_NH_ADD
: /* fall through */
2857 case FIB_EVENT_NH_DEL
:
2858 mlxsw_sp_nexthop_event(mlxsw_sp
, fib_work
->event
,
2859 fib_work
->fnh_info
.fib_nh
);
2860 fib_info_put(fib_work
->fnh_info
.fib_nh
->nh_parent
);
2867 /* Called with rcu_read_lock() */
2868 static int mlxsw_sp_router_fib_event(struct notifier_block
*nb
,
2869 unsigned long event
, void *ptr
)
2871 struct mlxsw_sp_fib_event_work
*fib_work
;
2872 struct fib_notifier_info
*info
= ptr
;
2873 struct mlxsw_sp_router
*router
;
2875 if (!net_eq(info
->net
, &init_net
))
2878 fib_work
= kzalloc(sizeof(*fib_work
), GFP_ATOMIC
);
2879 if (WARN_ON(!fib_work
))
2882 INIT_WORK(&fib_work
->work
, mlxsw_sp_router_fib_event_work
);
2883 router
= container_of(nb
, struct mlxsw_sp_router
, fib_nb
);
2884 fib_work
->mlxsw_sp
= router
->mlxsw_sp
;
2885 fib_work
->event
= event
;
2888 case FIB_EVENT_ENTRY_REPLACE
: /* fall through */
2889 case FIB_EVENT_ENTRY_APPEND
: /* fall through */
2890 case FIB_EVENT_ENTRY_ADD
: /* fall through */
2891 case FIB_EVENT_ENTRY_DEL
:
2892 memcpy(&fib_work
->fen_info
, ptr
, sizeof(fib_work
->fen_info
));
2893 /* Take referece on fib_info to prevent it from being
2894 * freed while work is queued. Release it afterwards.
2896 fib_info_hold(fib_work
->fen_info
.fi
);
2898 case FIB_EVENT_RULE_ADD
: /* fall through */
2899 case FIB_EVENT_RULE_DEL
:
2900 memcpy(&fib_work
->fr_info
, ptr
, sizeof(fib_work
->fr_info
));
2901 fib_rule_get(fib_work
->fr_info
.rule
);
2903 case FIB_EVENT_NH_ADD
: /* fall through */
2904 case FIB_EVENT_NH_DEL
:
2905 memcpy(&fib_work
->fnh_info
, ptr
, sizeof(fib_work
->fnh_info
));
2906 fib_info_hold(fib_work
->fnh_info
.fib_nh
->nh_parent
);
2910 mlxsw_core_schedule_work(&fib_work
->work
);
2915 static struct mlxsw_sp_rif
*
2916 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp
*mlxsw_sp
,
2917 const struct net_device
*dev
)
2921 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
2922 if (mlxsw_sp
->router
->rifs
[i
] &&
2923 mlxsw_sp
->router
->rifs
[i
]->dev
== dev
)
2924 return mlxsw_sp
->router
->rifs
[i
];
2929 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp
*mlxsw_sp
, u16 rif
)
2931 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2934 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif
);
2935 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2936 if (WARN_ON_ONCE(err
))
2939 mlxsw_reg_ritr_enable_set(ritr_pl
, false);
2940 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2943 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
2944 struct mlxsw_sp_rif
*rif
)
2946 mlxsw_sp_router_rif_disable(mlxsw_sp
, rif
->rif_index
);
2947 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp
, rif
);
2948 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp
, rif
);
2951 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif
*rif
,
2952 const struct in_device
*in_dev
,
2953 unsigned long event
)
2961 if (rif
&& !in_dev
->ifa_list
&&
2962 !netif_is_l3_slave(rif
->dev
))
2964 /* It is possible we already removed the RIF ourselves
2965 * if it was assigned to a netdev that is now a bridge
2974 static enum mlxsw_sp_rif_type
2975 mlxsw_sp_dev_rif_type(const struct mlxsw_sp
*mlxsw_sp
,
2976 const struct net_device
*dev
)
2978 enum mlxsw_sp_fid_type type
;
2980 /* RIF type is derived from the type of the underlying FID */
2981 if (is_vlan_dev(dev
) && netif_is_bridge_master(vlan_dev_real_dev(dev
)))
2982 type
= MLXSW_SP_FID_TYPE_8021Q
;
2983 else if (netif_is_bridge_master(dev
) && br_vlan_enabled(dev
))
2984 type
= MLXSW_SP_FID_TYPE_8021Q
;
2985 else if (netif_is_bridge_master(dev
))
2986 type
= MLXSW_SP_FID_TYPE_8021D
;
2988 type
= MLXSW_SP_FID_TYPE_RFID
;
2990 return mlxsw_sp_fid_type_rif_type(mlxsw_sp
, type
);
2993 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp
*mlxsw_sp
, u16
*p_rif_index
)
2997 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++) {
2998 if (!mlxsw_sp
->router
->rifs
[i
]) {
3007 static struct mlxsw_sp_rif
*mlxsw_sp_rif_alloc(size_t rif_size
, u16 rif_index
,
3009 struct net_device
*l3_dev
)
3011 struct mlxsw_sp_rif
*rif
;
3013 rif
= kzalloc(rif_size
, GFP_KERNEL
);
3017 INIT_LIST_HEAD(&rif
->nexthop_list
);
3018 INIT_LIST_HEAD(&rif
->neigh_list
);
3019 ether_addr_copy(rif
->addr
, l3_dev
->dev_addr
);
3020 rif
->mtu
= l3_dev
->mtu
;
3023 rif
->rif_index
= rif_index
;
3028 struct mlxsw_sp_rif
*mlxsw_sp_rif_by_index(const struct mlxsw_sp
*mlxsw_sp
,
3031 return mlxsw_sp
->router
->rifs
[rif_index
];
3034 u16
mlxsw_sp_rif_index(const struct mlxsw_sp_rif
*rif
)
3036 return rif
->rif_index
;
3039 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif
*rif
)
3041 return rif
->dev
->ifindex
;
3044 static struct mlxsw_sp_rif
*
3045 mlxsw_sp_rif_create(struct mlxsw_sp
*mlxsw_sp
,
3046 const struct mlxsw_sp_rif_params
*params
)
3048 u32 tb_id
= l3mdev_fib_table(params
->dev
);
3049 const struct mlxsw_sp_rif_ops
*ops
;
3050 enum mlxsw_sp_rif_type type
;
3051 struct mlxsw_sp_rif
*rif
;
3052 struct mlxsw_sp_fid
*fid
;
3053 struct mlxsw_sp_vr
*vr
;
3057 type
= mlxsw_sp_dev_rif_type(mlxsw_sp
, params
->dev
);
3058 ops
= mlxsw_sp
->router
->rif_ops_arr
[type
];
3060 vr
= mlxsw_sp_vr_get(mlxsw_sp
, tb_id
? : RT_TABLE_MAIN
);
3062 return ERR_CAST(vr
);
3064 err
= mlxsw_sp_rif_index_alloc(mlxsw_sp
, &rif_index
);
3066 goto err_rif_index_alloc
;
3068 rif
= mlxsw_sp_rif_alloc(ops
->rif_size
, rif_index
, vr
->id
, params
->dev
);
3073 rif
->mlxsw_sp
= mlxsw_sp
;
3076 fid
= ops
->fid_get(rif
);
3084 ops
->setup(rif
, params
);
3086 err
= ops
->configure(rif
);
3090 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, params
->dev
->dev_addr
,
3091 mlxsw_sp_fid_index(fid
), true);
3093 goto err_rif_fdb_op
;
3095 mlxsw_sp_rif_counters_alloc(rif
);
3096 mlxsw_sp_fid_rif_set(fid
, rif
);
3097 mlxsw_sp
->router
->rifs
[rif_index
] = rif
;
3103 ops
->deconfigure(rif
);
3105 mlxsw_sp_fid_put(fid
);
3109 err_rif_index_alloc
:
3110 mlxsw_sp_vr_put(vr
);
3111 return ERR_PTR(err
);
3114 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif
*rif
)
3116 const struct mlxsw_sp_rif_ops
*ops
= rif
->ops
;
3117 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
3118 struct mlxsw_sp_fid
*fid
= rif
->fid
;
3119 struct mlxsw_sp_vr
*vr
;
3121 mlxsw_sp_router_rif_gone_sync(mlxsw_sp
, rif
);
3122 vr
= &mlxsw_sp
->router
->vrs
[rif
->vr_id
];
3125 mlxsw_sp
->router
->rifs
[rif
->rif_index
] = NULL
;
3126 mlxsw_sp_fid_rif_set(fid
, NULL
);
3127 mlxsw_sp_rif_counters_free(rif
);
3128 mlxsw_sp_rif_fdb_op(mlxsw_sp
, rif
->dev
->dev_addr
,
3129 mlxsw_sp_fid_index(fid
), false);
3130 ops
->deconfigure(rif
);
3131 mlxsw_sp_fid_put(fid
);
3133 mlxsw_sp_vr_put(vr
);
3137 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params
*params
,
3138 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
3140 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
3142 params
->vid
= mlxsw_sp_port_vlan
->vid
;
3143 params
->lag
= mlxsw_sp_port
->lagged
;
3145 params
->lag_id
= mlxsw_sp_port
->lag_id
;
3147 params
->system_port
= mlxsw_sp_port
->local_port
;
3151 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
,
3152 struct net_device
*l3_dev
)
3154 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
3155 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3156 u16 vid
= mlxsw_sp_port_vlan
->vid
;
3157 struct mlxsw_sp_rif
*rif
;
3158 struct mlxsw_sp_fid
*fid
;
3161 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3163 struct mlxsw_sp_rif_params params
= {
3167 mlxsw_sp_rif_subport_params_init(¶ms
, mlxsw_sp_port_vlan
);
3168 rif
= mlxsw_sp_rif_create(mlxsw_sp
, ¶ms
);
3170 return PTR_ERR(rif
);
3173 /* FID was already created, just take a reference */
3174 fid
= rif
->ops
->fid_get(rif
);
3175 err
= mlxsw_sp_fid_port_vid_map(fid
, mlxsw_sp_port
, vid
);
3177 goto err_fid_port_vid_map
;
3179 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, false);
3181 goto err_port_vid_learning_set
;
3183 err
= mlxsw_sp_port_vid_stp_set(mlxsw_sp_port
, vid
,
3184 BR_STATE_FORWARDING
);
3186 goto err_port_vid_stp_set
;
3188 mlxsw_sp_port_vlan
->fid
= fid
;
3192 err_port_vid_stp_set
:
3193 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
3194 err_port_vid_learning_set
:
3195 mlxsw_sp_fid_port_vid_unmap(fid
, mlxsw_sp_port
, vid
);
3196 err_fid_port_vid_map
:
3197 mlxsw_sp_fid_put(fid
);
3202 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
3204 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
3205 struct mlxsw_sp_fid
*fid
= mlxsw_sp_port_vlan
->fid
;
3206 u16 vid
= mlxsw_sp_port_vlan
->vid
;
3208 if (WARN_ON(mlxsw_sp_fid_type(fid
) != MLXSW_SP_FID_TYPE_RFID
))
3211 mlxsw_sp_port_vlan
->fid
= NULL
;
3212 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port
, vid
, BR_STATE_BLOCKING
);
3213 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
3214 mlxsw_sp_fid_port_vid_unmap(fid
, mlxsw_sp_port
, vid
);
3215 /* If router port holds the last reference on the rFID, then the
3216 * associated Sub-port RIF will be destroyed.
3218 mlxsw_sp_fid_put(fid
);
3221 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device
*l3_dev
,
3222 struct net_device
*port_dev
,
3223 unsigned long event
, u16 vid
)
3225 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(port_dev
);
3226 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
3228 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
3229 if (WARN_ON(!mlxsw_sp_port_vlan
))
3234 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan
,
3237 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
3244 static int mlxsw_sp_inetaddr_port_event(struct net_device
*port_dev
,
3245 unsigned long event
)
3247 if (netif_is_bridge_port(port_dev
) ||
3248 netif_is_lag_port(port_dev
) ||
3249 netif_is_ovs_port(port_dev
))
3252 return mlxsw_sp_inetaddr_port_vlan_event(port_dev
, port_dev
, event
, 1);
3255 static int __mlxsw_sp_inetaddr_lag_event(struct net_device
*l3_dev
,
3256 struct net_device
*lag_dev
,
3257 unsigned long event
, u16 vid
)
3259 struct net_device
*port_dev
;
3260 struct list_head
*iter
;
3263 netdev_for_each_lower_dev(lag_dev
, port_dev
, iter
) {
3264 if (mlxsw_sp_port_dev_check(port_dev
)) {
3265 err
= mlxsw_sp_inetaddr_port_vlan_event(l3_dev
,
3276 static int mlxsw_sp_inetaddr_lag_event(struct net_device
*lag_dev
,
3277 unsigned long event
)
3279 if (netif_is_bridge_port(lag_dev
))
3282 return __mlxsw_sp_inetaddr_lag_event(lag_dev
, lag_dev
, event
, 1);
3285 static int mlxsw_sp_inetaddr_bridge_event(struct net_device
*l3_dev
,
3286 unsigned long event
)
3288 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
3289 struct mlxsw_sp_rif_params params
= {
3292 struct mlxsw_sp_rif
*rif
;
3296 rif
= mlxsw_sp_rif_create(mlxsw_sp
, ¶ms
);
3298 return PTR_ERR(rif
);
3301 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3302 mlxsw_sp_rif_destroy(rif
);
3309 static int mlxsw_sp_inetaddr_vlan_event(struct net_device
*vlan_dev
,
3310 unsigned long event
)
3312 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3313 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3315 if (netif_is_bridge_port(vlan_dev
))
3318 if (mlxsw_sp_port_dev_check(real_dev
))
3319 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev
, real_dev
,
3321 else if (netif_is_lag_master(real_dev
))
3322 return __mlxsw_sp_inetaddr_lag_event(vlan_dev
, real_dev
, event
,
3324 else if (netif_is_bridge_master(real_dev
) && br_vlan_enabled(real_dev
))
3325 return mlxsw_sp_inetaddr_bridge_event(vlan_dev
, event
);
3330 static int __mlxsw_sp_inetaddr_event(struct net_device
*dev
,
3331 unsigned long event
)
3333 if (mlxsw_sp_port_dev_check(dev
))
3334 return mlxsw_sp_inetaddr_port_event(dev
, event
);
3335 else if (netif_is_lag_master(dev
))
3336 return mlxsw_sp_inetaddr_lag_event(dev
, event
);
3337 else if (netif_is_bridge_master(dev
))
3338 return mlxsw_sp_inetaddr_bridge_event(dev
, event
);
3339 else if (is_vlan_dev(dev
))
3340 return mlxsw_sp_inetaddr_vlan_event(dev
, event
);
3345 int mlxsw_sp_inetaddr_event(struct notifier_block
*unused
,
3346 unsigned long event
, void *ptr
)
3348 struct in_ifaddr
*ifa
= (struct in_ifaddr
*) ptr
;
3349 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
3350 struct mlxsw_sp
*mlxsw_sp
;
3351 struct mlxsw_sp_rif
*rif
;
3354 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3358 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3359 if (!mlxsw_sp_rif_should_config(rif
, ifa
->ifa_dev
, event
))
3362 err
= __mlxsw_sp_inetaddr_event(dev
, event
);
3364 return notifier_from_errno(err
);
3367 static int mlxsw_sp_rif_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif_index
,
3368 const char *mac
, int mtu
)
3370 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3373 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif_index
);
3374 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3378 mlxsw_reg_ritr_mtu_set(ritr_pl
, mtu
);
3379 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl
, mac
);
3380 mlxsw_reg_ritr_op_set(ritr_pl
, MLXSW_REG_RITR_RIF_CREATE
);
3381 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3384 int mlxsw_sp_netdevice_router_port_event(struct net_device
*dev
)
3386 struct mlxsw_sp
*mlxsw_sp
;
3387 struct mlxsw_sp_rif
*rif
;
3391 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3395 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3398 fid_index
= mlxsw_sp_fid_index(rif
->fid
);
3400 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, rif
->addr
, fid_index
, false);
3404 err
= mlxsw_sp_rif_edit(mlxsw_sp
, rif
->rif_index
, dev
->dev_addr
,
3409 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, dev
->dev_addr
, fid_index
, true);
3411 goto err_rif_fdb_op
;
3413 ether_addr_copy(rif
->addr
, dev
->dev_addr
);
3414 rif
->mtu
= dev
->mtu
;
3416 netdev_dbg(dev
, "Updated RIF=%d\n", rif
->rif_index
);
3421 mlxsw_sp_rif_edit(mlxsw_sp
, rif
->rif_index
, rif
->addr
, rif
->mtu
);
3423 mlxsw_sp_rif_fdb_op(mlxsw_sp
, rif
->addr
, fid_index
, true);
3427 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp
*mlxsw_sp
,
3428 struct net_device
*l3_dev
)
3430 struct mlxsw_sp_rif
*rif
;
3432 /* If netdev is already associated with a RIF, then we need to
3433 * destroy it and create a new one with the new virtual router ID.
3435 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3437 __mlxsw_sp_inetaddr_event(l3_dev
, NETDEV_DOWN
);
3439 return __mlxsw_sp_inetaddr_event(l3_dev
, NETDEV_UP
);
3442 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp
*mlxsw_sp
,
3443 struct net_device
*l3_dev
)
3445 struct mlxsw_sp_rif
*rif
;
3447 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3450 __mlxsw_sp_inetaddr_event(l3_dev
, NETDEV_DOWN
);
3453 int mlxsw_sp_netdevice_vrf_event(struct net_device
*l3_dev
, unsigned long event
,
3454 struct netdev_notifier_changeupper_info
*info
)
3456 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
3463 case NETDEV_PRECHANGEUPPER
:
3465 case NETDEV_CHANGEUPPER
:
3467 err
= mlxsw_sp_port_vrf_join(mlxsw_sp
, l3_dev
);
3469 mlxsw_sp_port_vrf_leave(mlxsw_sp
, l3_dev
);
3476 static struct mlxsw_sp_rif_subport
*
3477 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif
*rif
)
3479 return container_of(rif
, struct mlxsw_sp_rif_subport
, common
);
3482 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif
*rif
,
3483 const struct mlxsw_sp_rif_params
*params
)
3485 struct mlxsw_sp_rif_subport
*rif_subport
;
3487 rif_subport
= mlxsw_sp_rif_subport_rif(rif
);
3488 rif_subport
->vid
= params
->vid
;
3489 rif_subport
->lag
= params
->lag
;
3491 rif_subport
->lag_id
= params
->lag_id
;
3493 rif_subport
->system_port
= params
->system_port
;
3496 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif
*rif
, bool enable
)
3498 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
3499 struct mlxsw_sp_rif_subport
*rif_subport
;
3500 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3502 rif_subport
= mlxsw_sp_rif_subport_rif(rif
);
3503 mlxsw_reg_ritr_pack(ritr_pl
, enable
, MLXSW_REG_RITR_SP_IF
,
3504 rif
->rif_index
, rif
->vr_id
, rif
->dev
->mtu
,
3505 rif
->dev
->dev_addr
);
3506 mlxsw_reg_ritr_sp_if_pack(ritr_pl
, rif_subport
->lag
,
3507 rif_subport
->lag
? rif_subport
->lag_id
:
3508 rif_subport
->system_port
,
3511 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3514 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif
*rif
)
3516 return mlxsw_sp_rif_subport_op(rif
, true);
3519 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif
*rif
)
3521 mlxsw_sp_rif_subport_op(rif
, false);
3524 static struct mlxsw_sp_fid
*
3525 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif
*rif
)
3527 return mlxsw_sp_fid_rfid_get(rif
->mlxsw_sp
, rif
->rif_index
);
3530 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops
= {
3531 .type
= MLXSW_SP_RIF_TYPE_SUBPORT
,
3532 .rif_size
= sizeof(struct mlxsw_sp_rif_subport
),
3533 .setup
= mlxsw_sp_rif_subport_setup
,
3534 .configure
= mlxsw_sp_rif_subport_configure
,
3535 .deconfigure
= mlxsw_sp_rif_subport_deconfigure
,
3536 .fid_get
= mlxsw_sp_rif_subport_fid_get
,
3539 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif
*rif
,
3540 enum mlxsw_reg_ritr_if_type type
,
3541 u16 vid_fid
, bool enable
)
3543 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
3544 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3546 mlxsw_reg_ritr_pack(ritr_pl
, enable
, type
, rif
->rif_index
, rif
->vr_id
,
3547 rif
->dev
->mtu
, rif
->dev
->dev_addr
);
3548 mlxsw_reg_ritr_fid_set(ritr_pl
, type
, vid_fid
);
3550 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3553 static u8
mlxsw_sp_router_port(const struct mlxsw_sp
*mlxsw_sp
)
3555 return mlxsw_core_max_ports(mlxsw_sp
->core
) + 1;
3558 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif
*rif
)
3560 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
3561 u16 vid
= mlxsw_sp_fid_8021q_vid(rif
->fid
);
3564 err
= mlxsw_sp_rif_vlan_fid_op(rif
, MLXSW_REG_RITR_VLAN_IF
, vid
, true);
3568 err
= mlxsw_sp_fid_flood_set(rif
->fid
, MLXSW_SP_FLOOD_TYPE_BC
,
3569 mlxsw_sp_router_port(mlxsw_sp
), true);
3571 goto err_fid_bc_flood_set
;
3575 err_fid_bc_flood_set
:
3576 mlxsw_sp_rif_vlan_fid_op(rif
, MLXSW_REG_RITR_VLAN_IF
, vid
, false);
3580 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif
*rif
)
3582 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
3583 u16 vid
= mlxsw_sp_fid_8021q_vid(rif
->fid
);
3585 mlxsw_sp_fid_flood_set(rif
->fid
, MLXSW_SP_FLOOD_TYPE_BC
,
3586 mlxsw_sp_router_port(mlxsw_sp
), false);
3587 mlxsw_sp_rif_vlan_fid_op(rif
, MLXSW_REG_RITR_VLAN_IF
, vid
, false);
3590 static struct mlxsw_sp_fid
*
3591 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif
*rif
)
3593 u16 vid
= is_vlan_dev(rif
->dev
) ? vlan_dev_vlan_id(rif
->dev
) : 1;
3595 return mlxsw_sp_fid_8021q_get(rif
->mlxsw_sp
, vid
);
3598 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops
= {
3599 .type
= MLXSW_SP_RIF_TYPE_VLAN
,
3600 .rif_size
= sizeof(struct mlxsw_sp_rif
),
3601 .configure
= mlxsw_sp_rif_vlan_configure
,
3602 .deconfigure
= mlxsw_sp_rif_vlan_deconfigure
,
3603 .fid_get
= mlxsw_sp_rif_vlan_fid_get
,
3606 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif
*rif
)
3608 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
3609 u16 fid_index
= mlxsw_sp_fid_index(rif
->fid
);
3612 err
= mlxsw_sp_rif_vlan_fid_op(rif
, MLXSW_REG_RITR_FID_IF
, fid_index
,
3617 err
= mlxsw_sp_fid_flood_set(rif
->fid
, MLXSW_SP_FLOOD_TYPE_BC
,
3618 mlxsw_sp_router_port(mlxsw_sp
), true);
3620 goto err_fid_bc_flood_set
;
3624 err_fid_bc_flood_set
:
3625 mlxsw_sp_rif_vlan_fid_op(rif
, MLXSW_REG_RITR_FID_IF
, fid_index
, false);
3629 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif
*rif
)
3631 struct mlxsw_sp
*mlxsw_sp
= rif
->mlxsw_sp
;
3632 u16 fid_index
= mlxsw_sp_fid_index(rif
->fid
);
3634 mlxsw_sp_fid_flood_set(rif
->fid
, MLXSW_SP_FLOOD_TYPE_BC
,
3635 mlxsw_sp_router_port(mlxsw_sp
), false);
3636 mlxsw_sp_rif_vlan_fid_op(rif
, MLXSW_REG_RITR_FID_IF
, fid_index
, false);
3639 static struct mlxsw_sp_fid
*
3640 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif
*rif
)
3642 return mlxsw_sp_fid_8021d_get(rif
->mlxsw_sp
, rif
->dev
->ifindex
);
3645 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops
= {
3646 .type
= MLXSW_SP_RIF_TYPE_FID
,
3647 .rif_size
= sizeof(struct mlxsw_sp_rif
),
3648 .configure
= mlxsw_sp_rif_fid_configure
,
3649 .deconfigure
= mlxsw_sp_rif_fid_deconfigure
,
3650 .fid_get
= mlxsw_sp_rif_fid_fid_get
,
3653 static const struct mlxsw_sp_rif_ops
*mlxsw_sp_rif_ops_arr
[] = {
3654 [MLXSW_SP_RIF_TYPE_SUBPORT
] = &mlxsw_sp_rif_subport_ops
,
3655 [MLXSW_SP_RIF_TYPE_VLAN
] = &mlxsw_sp_rif_vlan_ops
,
3656 [MLXSW_SP_RIF_TYPE_FID
] = &mlxsw_sp_rif_fid_ops
,
3659 static int mlxsw_sp_rifs_init(struct mlxsw_sp
*mlxsw_sp
)
3661 u64 max_rifs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
);
3663 mlxsw_sp
->router
->rifs
= kcalloc(max_rifs
,
3664 sizeof(struct mlxsw_sp_rif
*),
3666 if (!mlxsw_sp
->router
->rifs
)
3669 mlxsw_sp
->router
->rif_ops_arr
= mlxsw_sp_rif_ops_arr
;
3674 static void mlxsw_sp_rifs_fini(struct mlxsw_sp
*mlxsw_sp
)
3678 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
3679 WARN_ON_ONCE(mlxsw_sp
->router
->rifs
[i
]);
3681 kfree(mlxsw_sp
->router
->rifs
);
3684 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block
*nb
)
3686 struct mlxsw_sp_router
*router
;
3688 /* Flush pending FIB notifications and then flush the device's
3689 * table before requesting another dump. The FIB notification
3690 * block is unregistered, so no need to take RTNL.
3692 mlxsw_core_flush_owq();
3693 router
= container_of(nb
, struct mlxsw_sp_router
, fib_nb
);
3694 mlxsw_sp_router_fib_flush(router
->mlxsw_sp
);
3697 static int __mlxsw_sp_router_init(struct mlxsw_sp
*mlxsw_sp
)
3699 char rgcr_pl
[MLXSW_REG_RGCR_LEN
];
3703 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_RIFS
))
3705 max_rifs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
);
3707 mlxsw_reg_rgcr_pack(rgcr_pl
, true);
3708 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl
, max_rifs
);
3709 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rgcr
), rgcr_pl
);
3715 static void __mlxsw_sp_router_fini(struct mlxsw_sp
*mlxsw_sp
)
3717 char rgcr_pl
[MLXSW_REG_RGCR_LEN
];
3719 mlxsw_reg_rgcr_pack(rgcr_pl
, false);
3720 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rgcr
), rgcr_pl
);
3723 int mlxsw_sp_router_init(struct mlxsw_sp
*mlxsw_sp
)
3725 struct mlxsw_sp_router
*router
;
3728 router
= kzalloc(sizeof(*mlxsw_sp
->router
), GFP_KERNEL
);
3731 mlxsw_sp
->router
= router
;
3732 router
->mlxsw_sp
= mlxsw_sp
;
3734 INIT_LIST_HEAD(&mlxsw_sp
->router
->nexthop_neighs_list
);
3735 err
= __mlxsw_sp_router_init(mlxsw_sp
);
3737 goto err_router_init
;
3739 err
= mlxsw_sp_rifs_init(mlxsw_sp
);
3743 err
= rhashtable_init(&mlxsw_sp
->router
->nexthop_ht
,
3744 &mlxsw_sp_nexthop_ht_params
);
3746 goto err_nexthop_ht_init
;
3748 err
= rhashtable_init(&mlxsw_sp
->router
->nexthop_group_ht
,
3749 &mlxsw_sp_nexthop_group_ht_params
);
3751 goto err_nexthop_group_ht_init
;
3753 err
= mlxsw_sp_lpm_init(mlxsw_sp
);
3757 err
= mlxsw_sp_vrs_init(mlxsw_sp
);
3761 err
= mlxsw_sp_neigh_init(mlxsw_sp
);
3763 goto err_neigh_init
;
3765 mlxsw_sp
->router
->fib_nb
.notifier_call
= mlxsw_sp_router_fib_event
;
3766 err
= register_fib_notifier(&mlxsw_sp
->router
->fib_nb
,
3767 mlxsw_sp_router_fib_dump_flush
);
3769 goto err_register_fib_notifier
;
3773 err_register_fib_notifier
:
3774 mlxsw_sp_neigh_fini(mlxsw_sp
);
3776 mlxsw_sp_vrs_fini(mlxsw_sp
);
3778 mlxsw_sp_lpm_fini(mlxsw_sp
);
3780 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_group_ht
);
3781 err_nexthop_group_ht_init
:
3782 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_ht
);
3783 err_nexthop_ht_init
:
3784 mlxsw_sp_rifs_fini(mlxsw_sp
);
3786 __mlxsw_sp_router_fini(mlxsw_sp
);
3788 kfree(mlxsw_sp
->router
);
3792 void mlxsw_sp_router_fini(struct mlxsw_sp
*mlxsw_sp
)
3794 unregister_fib_notifier(&mlxsw_sp
->router
->fib_nb
);
3795 mlxsw_sp_neigh_fini(mlxsw_sp
);
3796 mlxsw_sp_vrs_fini(mlxsw_sp
);
3797 mlxsw_sp_lpm_fini(mlxsw_sp
);
3798 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_group_ht
);
3799 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_ht
);
3800 mlxsw_sp_rifs_fini(mlxsw_sp
);
3801 __mlxsw_sp_router_fini(mlxsw_sp
);
3802 kfree(mlxsw_sp
->router
);