2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <linux/inetdevice.h>
44 #include <linux/netdevice.h>
45 #include <linux/if_bridge.h>
46 #include <net/netevent.h>
47 #include <net/neighbour.h>
49 #include <net/ip_fib.h>
50 #include <net/fib_rules.h>
51 #include <net/l3mdev.h>
56 #include "spectrum_cnt.h"
57 #include "spectrum_dpipe.h"
58 #include "spectrum_router.h"
61 struct mlxsw_sp_lpm_tree
;
63 struct mlxsw_sp_router
{
64 struct mlxsw_sp
*mlxsw_sp
;
65 struct mlxsw_sp_rif
**rifs
;
66 struct mlxsw_sp_vr
*vrs
;
67 struct rhashtable neigh_ht
;
68 struct rhashtable nexthop_group_ht
;
69 struct rhashtable nexthop_ht
;
71 struct mlxsw_sp_lpm_tree
*trees
;
72 unsigned int tree_count
;
75 struct delayed_work dw
;
76 unsigned long interval
; /* ms */
78 struct delayed_work nexthop_probe_dw
;
79 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
80 struct list_head nexthop_neighs_list
;
82 struct notifier_block fib_nb
;
86 struct list_head nexthop_list
;
87 struct list_head neigh_list
;
88 struct net_device
*dev
;
89 struct mlxsw_sp_fid
*f
;
90 unsigned char addr
[ETH_ALEN
];
94 unsigned int counter_ingress
;
95 bool counter_ingress_valid
;
96 unsigned int counter_egress
;
97 bool counter_egress_valid
;
100 static unsigned int *
101 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif
*rif
,
102 enum mlxsw_sp_rif_counter_dir dir
)
105 case MLXSW_SP_RIF_COUNTER_EGRESS
:
106 return &rif
->counter_egress
;
107 case MLXSW_SP_RIF_COUNTER_INGRESS
:
108 return &rif
->counter_ingress
;
114 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif
*rif
,
115 enum mlxsw_sp_rif_counter_dir dir
)
118 case MLXSW_SP_RIF_COUNTER_EGRESS
:
119 return rif
->counter_egress_valid
;
120 case MLXSW_SP_RIF_COUNTER_INGRESS
:
121 return rif
->counter_ingress_valid
;
127 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif
*rif
,
128 enum mlxsw_sp_rif_counter_dir dir
,
132 case MLXSW_SP_RIF_COUNTER_EGRESS
:
133 rif
->counter_egress_valid
= valid
;
135 case MLXSW_SP_RIF_COUNTER_INGRESS
:
136 rif
->counter_ingress_valid
= valid
;
141 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif_index
,
142 unsigned int counter_index
, bool enable
,
143 enum mlxsw_sp_rif_counter_dir dir
)
145 char ritr_pl
[MLXSW_REG_RITR_LEN
];
146 bool is_egress
= false;
149 if (dir
== MLXSW_SP_RIF_COUNTER_EGRESS
)
151 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif_index
);
152 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
156 mlxsw_reg_ritr_counter_pack(ritr_pl
, counter_index
, enable
,
158 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
161 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp
*mlxsw_sp
,
162 struct mlxsw_sp_rif
*rif
,
163 enum mlxsw_sp_rif_counter_dir dir
, u64
*cnt
)
165 char ricnt_pl
[MLXSW_REG_RICNT_LEN
];
166 unsigned int *p_counter_index
;
170 valid
= mlxsw_sp_rif_counter_valid_get(rif
, dir
);
174 p_counter_index
= mlxsw_sp_rif_p_counter_get(rif
, dir
);
175 if (!p_counter_index
)
177 mlxsw_reg_ricnt_pack(ricnt_pl
, *p_counter_index
,
178 MLXSW_REG_RICNT_OPCODE_NOP
);
179 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ricnt
), ricnt_pl
);
182 *cnt
= mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl
);
186 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
187 unsigned int counter_index
)
189 char ricnt_pl
[MLXSW_REG_RICNT_LEN
];
191 mlxsw_reg_ricnt_pack(ricnt_pl
, counter_index
,
192 MLXSW_REG_RICNT_OPCODE_CLEAR
);
193 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ricnt
), ricnt_pl
);
196 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
197 struct mlxsw_sp_rif
*rif
,
198 enum mlxsw_sp_rif_counter_dir dir
)
200 unsigned int *p_counter_index
;
203 p_counter_index
= mlxsw_sp_rif_p_counter_get(rif
, dir
);
204 if (!p_counter_index
)
206 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_RIF
,
211 err
= mlxsw_sp_rif_counter_clear(mlxsw_sp
, *p_counter_index
);
213 goto err_counter_clear
;
215 err
= mlxsw_sp_rif_counter_edit(mlxsw_sp
, rif
->rif_index
,
216 *p_counter_index
, true, dir
);
218 goto err_counter_edit
;
219 mlxsw_sp_rif_counter_valid_set(rif
, dir
, true);
224 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_RIF
,
229 void mlxsw_sp_rif_counter_free(struct mlxsw_sp
*mlxsw_sp
,
230 struct mlxsw_sp_rif
*rif
,
231 enum mlxsw_sp_rif_counter_dir dir
)
233 unsigned int *p_counter_index
;
235 if (!mlxsw_sp_rif_counter_valid_get(rif
, dir
))
238 p_counter_index
= mlxsw_sp_rif_p_counter_get(rif
, dir
);
239 if (WARN_ON(!p_counter_index
))
241 mlxsw_sp_rif_counter_edit(mlxsw_sp
, rif
->rif_index
,
242 *p_counter_index
, false, dir
);
243 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_RIF
,
245 mlxsw_sp_rif_counter_valid_set(rif
, dir
, false);
248 static struct mlxsw_sp_rif
*
249 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp
*mlxsw_sp
,
250 const struct net_device
*dev
);
252 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
254 struct mlxsw_sp_prefix_usage
{
255 DECLARE_BITMAP(b
, MLXSW_SP_PREFIX_COUNT
);
258 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
259 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
262 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
263 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
265 unsigned char prefix
;
267 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage1
) {
268 if (!test_bit(prefix
, prefix_usage2
->b
))
275 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
276 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
278 return !memcmp(prefix_usage1
, prefix_usage2
, sizeof(*prefix_usage1
));
282 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage
*prefix_usage
)
284 struct mlxsw_sp_prefix_usage prefix_usage_none
= {{ 0 } };
286 return mlxsw_sp_prefix_usage_eq(prefix_usage
, &prefix_usage_none
);
290 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage
*prefix_usage1
,
291 struct mlxsw_sp_prefix_usage
*prefix_usage2
)
293 memcpy(prefix_usage1
, prefix_usage2
, sizeof(*prefix_usage1
));
297 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage
*prefix_usage
,
298 unsigned char prefix_len
)
300 set_bit(prefix_len
, prefix_usage
->b
);
304 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage
*prefix_usage
,
305 unsigned char prefix_len
)
307 clear_bit(prefix_len
, prefix_usage
->b
);
310 struct mlxsw_sp_fib_key
{
311 unsigned char addr
[sizeof(struct in6_addr
)];
312 unsigned char prefix_len
;
315 enum mlxsw_sp_fib_entry_type
{
316 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
,
317 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
,
318 MLXSW_SP_FIB_ENTRY_TYPE_TRAP
,
321 struct mlxsw_sp_nexthop_group
;
324 struct mlxsw_sp_fib_node
{
325 struct list_head entry_list
;
326 struct list_head list
;
327 struct rhash_head ht_node
;
328 struct mlxsw_sp_fib
*fib
;
329 struct mlxsw_sp_fib_key key
;
332 struct mlxsw_sp_fib_entry_params
{
339 struct mlxsw_sp_fib_entry
{
340 struct list_head list
;
341 struct mlxsw_sp_fib_node
*fib_node
;
342 enum mlxsw_sp_fib_entry_type type
;
343 struct list_head nexthop_group_node
;
344 struct mlxsw_sp_nexthop_group
*nh_group
;
345 struct mlxsw_sp_fib_entry_params params
;
349 enum mlxsw_sp_l3proto
{
350 MLXSW_SP_L3_PROTO_IPV4
,
351 MLXSW_SP_L3_PROTO_IPV6
,
354 struct mlxsw_sp_lpm_tree
{
356 unsigned int ref_count
;
357 enum mlxsw_sp_l3proto proto
;
358 struct mlxsw_sp_prefix_usage prefix_usage
;
361 struct mlxsw_sp_fib
{
362 struct rhashtable ht
;
363 struct list_head node_list
;
364 struct mlxsw_sp_vr
*vr
;
365 struct mlxsw_sp_lpm_tree
*lpm_tree
;
366 unsigned long prefix_ref_count
[MLXSW_SP_PREFIX_COUNT
];
367 struct mlxsw_sp_prefix_usage prefix_usage
;
368 enum mlxsw_sp_l3proto proto
;
372 u16 id
; /* virtual router ID */
373 u32 tb_id
; /* kernel fib table id */
374 unsigned int rif_count
;
375 struct mlxsw_sp_fib
*fib4
;
378 static const struct rhashtable_params mlxsw_sp_fib_ht_params
;
380 static struct mlxsw_sp_fib
*mlxsw_sp_fib_create(struct mlxsw_sp_vr
*vr
,
381 enum mlxsw_sp_l3proto proto
)
383 struct mlxsw_sp_fib
*fib
;
386 fib
= kzalloc(sizeof(*fib
), GFP_KERNEL
);
388 return ERR_PTR(-ENOMEM
);
389 err
= rhashtable_init(&fib
->ht
, &mlxsw_sp_fib_ht_params
);
391 goto err_rhashtable_init
;
392 INIT_LIST_HEAD(&fib
->node_list
);
402 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib
*fib
)
404 WARN_ON(!list_empty(&fib
->node_list
));
405 WARN_ON(fib
->lpm_tree
);
406 rhashtable_destroy(&fib
->ht
);
410 static struct mlxsw_sp_lpm_tree
*
411 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp
*mlxsw_sp
)
413 static struct mlxsw_sp_lpm_tree
*lpm_tree
;
416 for (i
= 0; i
< mlxsw_sp
->router
->lpm
.tree_count
; i
++) {
417 lpm_tree
= &mlxsw_sp
->router
->lpm
.trees
[i
];
418 if (lpm_tree
->ref_count
== 0)
424 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp
*mlxsw_sp
,
425 struct mlxsw_sp_lpm_tree
*lpm_tree
)
427 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
429 mlxsw_reg_ralta_pack(ralta_pl
, true,
430 (enum mlxsw_reg_ralxx_protocol
) lpm_tree
->proto
,
432 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
435 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp
*mlxsw_sp
,
436 struct mlxsw_sp_lpm_tree
*lpm_tree
)
438 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
440 mlxsw_reg_ralta_pack(ralta_pl
, false,
441 (enum mlxsw_reg_ralxx_protocol
) lpm_tree
->proto
,
443 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
447 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp
*mlxsw_sp
,
448 struct mlxsw_sp_prefix_usage
*prefix_usage
,
449 struct mlxsw_sp_lpm_tree
*lpm_tree
)
451 char ralst_pl
[MLXSW_REG_RALST_LEN
];
454 u8 last_prefix
= MLXSW_REG_RALST_BIN_NO_CHILD
;
456 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage
)
459 mlxsw_reg_ralst_pack(ralst_pl
, root_bin
, lpm_tree
->id
);
460 mlxsw_sp_prefix_usage_for_each(prefix
, prefix_usage
) {
463 mlxsw_reg_ralst_bin_pack(ralst_pl
, prefix
, last_prefix
,
464 MLXSW_REG_RALST_BIN_NO_CHILD
);
465 last_prefix
= prefix
;
467 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralst
), ralst_pl
);
470 static struct mlxsw_sp_lpm_tree
*
471 mlxsw_sp_lpm_tree_create(struct mlxsw_sp
*mlxsw_sp
,
472 struct mlxsw_sp_prefix_usage
*prefix_usage
,
473 enum mlxsw_sp_l3proto proto
)
475 struct mlxsw_sp_lpm_tree
*lpm_tree
;
478 lpm_tree
= mlxsw_sp_lpm_tree_find_unused(mlxsw_sp
);
480 return ERR_PTR(-EBUSY
);
481 lpm_tree
->proto
= proto
;
482 err
= mlxsw_sp_lpm_tree_alloc(mlxsw_sp
, lpm_tree
);
486 err
= mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp
, prefix_usage
,
489 goto err_left_struct_set
;
490 memcpy(&lpm_tree
->prefix_usage
, prefix_usage
,
491 sizeof(lpm_tree
->prefix_usage
));
495 mlxsw_sp_lpm_tree_free(mlxsw_sp
, lpm_tree
);
499 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp
*mlxsw_sp
,
500 struct mlxsw_sp_lpm_tree
*lpm_tree
)
502 return mlxsw_sp_lpm_tree_free(mlxsw_sp
, lpm_tree
);
505 static struct mlxsw_sp_lpm_tree
*
506 mlxsw_sp_lpm_tree_get(struct mlxsw_sp
*mlxsw_sp
,
507 struct mlxsw_sp_prefix_usage
*prefix_usage
,
508 enum mlxsw_sp_l3proto proto
)
510 struct mlxsw_sp_lpm_tree
*lpm_tree
;
513 for (i
= 0; i
< mlxsw_sp
->router
->lpm
.tree_count
; i
++) {
514 lpm_tree
= &mlxsw_sp
->router
->lpm
.trees
[i
];
515 if (lpm_tree
->ref_count
!= 0 &&
516 lpm_tree
->proto
== proto
&&
517 mlxsw_sp_prefix_usage_eq(&lpm_tree
->prefix_usage
,
521 lpm_tree
= mlxsw_sp_lpm_tree_create(mlxsw_sp
, prefix_usage
,
523 if (IS_ERR(lpm_tree
))
527 lpm_tree
->ref_count
++;
531 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp
*mlxsw_sp
,
532 struct mlxsw_sp_lpm_tree
*lpm_tree
)
534 if (--lpm_tree
->ref_count
== 0)
535 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp
, lpm_tree
);
539 #define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
541 static int mlxsw_sp_lpm_init(struct mlxsw_sp
*mlxsw_sp
)
543 struct mlxsw_sp_lpm_tree
*lpm_tree
;
547 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LPM_TREES
))
550 max_trees
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LPM_TREES
);
551 mlxsw_sp
->router
->lpm
.tree_count
= max_trees
- MLXSW_SP_LPM_TREE_MIN
;
552 mlxsw_sp
->router
->lpm
.trees
= kcalloc(mlxsw_sp
->router
->lpm
.tree_count
,
553 sizeof(struct mlxsw_sp_lpm_tree
),
555 if (!mlxsw_sp
->router
->lpm
.trees
)
558 for (i
= 0; i
< mlxsw_sp
->router
->lpm
.tree_count
; i
++) {
559 lpm_tree
= &mlxsw_sp
->router
->lpm
.trees
[i
];
560 lpm_tree
->id
= i
+ MLXSW_SP_LPM_TREE_MIN
;
566 static void mlxsw_sp_lpm_fini(struct mlxsw_sp
*mlxsw_sp
)
568 kfree(mlxsw_sp
->router
->lpm
.trees
);
571 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr
*vr
)
576 static struct mlxsw_sp_vr
*mlxsw_sp_vr_find_unused(struct mlxsw_sp
*mlxsw_sp
)
578 struct mlxsw_sp_vr
*vr
;
581 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
582 vr
= &mlxsw_sp
->router
->vrs
[i
];
583 if (!mlxsw_sp_vr_is_used(vr
))
589 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp
*mlxsw_sp
,
590 const struct mlxsw_sp_fib
*fib
)
592 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
594 mlxsw_reg_raltb_pack(raltb_pl
, fib
->vr
->id
,
595 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
,
597 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
), raltb_pl
);
600 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp
*mlxsw_sp
,
601 const struct mlxsw_sp_fib
*fib
)
603 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
605 /* Bind to tree 0 which is default */
606 mlxsw_reg_raltb_pack(raltb_pl
, fib
->vr
->id
,
607 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, 0);
608 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
), raltb_pl
);
611 static u32
mlxsw_sp_fix_tb_id(u32 tb_id
)
613 /* For our purpose, squash main and local table into one */
614 if (tb_id
== RT_TABLE_LOCAL
)
615 tb_id
= RT_TABLE_MAIN
;
619 static struct mlxsw_sp_vr
*mlxsw_sp_vr_find(struct mlxsw_sp
*mlxsw_sp
,
622 struct mlxsw_sp_vr
*vr
;
625 tb_id
= mlxsw_sp_fix_tb_id(tb_id
);
627 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
628 vr
= &mlxsw_sp
->router
->vrs
[i
];
629 if (mlxsw_sp_vr_is_used(vr
) && vr
->tb_id
== tb_id
)
635 static struct mlxsw_sp_fib
*mlxsw_sp_vr_fib(const struct mlxsw_sp_vr
*vr
,
636 enum mlxsw_sp_l3proto proto
)
639 case MLXSW_SP_L3_PROTO_IPV4
:
641 case MLXSW_SP_L3_PROTO_IPV6
:
647 static struct mlxsw_sp_vr
*mlxsw_sp_vr_create(struct mlxsw_sp
*mlxsw_sp
,
650 struct mlxsw_sp_vr
*vr
;
652 vr
= mlxsw_sp_vr_find_unused(mlxsw_sp
);
654 return ERR_PTR(-EBUSY
);
655 vr
->fib4
= mlxsw_sp_fib_create(vr
, MLXSW_SP_L3_PROTO_IPV4
);
656 if (IS_ERR(vr
->fib4
))
657 return ERR_CAST(vr
->fib4
);
662 static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr
*vr
)
664 mlxsw_sp_fib_destroy(vr
->fib4
);
669 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp
*mlxsw_sp
, struct mlxsw_sp_fib
*fib
,
670 struct mlxsw_sp_prefix_usage
*req_prefix_usage
)
672 struct mlxsw_sp_lpm_tree
*lpm_tree
= fib
->lpm_tree
;
673 struct mlxsw_sp_lpm_tree
*new_tree
;
676 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage
, &lpm_tree
->prefix_usage
))
679 new_tree
= mlxsw_sp_lpm_tree_get(mlxsw_sp
, req_prefix_usage
,
681 if (IS_ERR(new_tree
)) {
682 /* We failed to get a tree according to the required
683 * prefix usage. However, the current tree might be still good
684 * for us if our requirement is subset of the prefixes used
687 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage
,
688 &lpm_tree
->prefix_usage
))
690 return PTR_ERR(new_tree
);
693 /* Prevent packet loss by overwriting existing binding */
694 fib
->lpm_tree
= new_tree
;
695 err
= mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp
, fib
);
698 mlxsw_sp_lpm_tree_put(mlxsw_sp
, lpm_tree
);
703 fib
->lpm_tree
= lpm_tree
;
704 mlxsw_sp_lpm_tree_put(mlxsw_sp
, new_tree
);
708 static struct mlxsw_sp_vr
*mlxsw_sp_vr_get(struct mlxsw_sp
*mlxsw_sp
, u32 tb_id
)
710 struct mlxsw_sp_vr
*vr
;
712 tb_id
= mlxsw_sp_fix_tb_id(tb_id
);
713 vr
= mlxsw_sp_vr_find(mlxsw_sp
, tb_id
);
715 vr
= mlxsw_sp_vr_create(mlxsw_sp
, tb_id
);
719 static void mlxsw_sp_vr_put(struct mlxsw_sp_vr
*vr
)
721 if (!vr
->rif_count
&& list_empty(&vr
->fib4
->node_list
))
722 mlxsw_sp_vr_destroy(vr
);
725 static int mlxsw_sp_vrs_init(struct mlxsw_sp
*mlxsw_sp
)
727 struct mlxsw_sp_vr
*vr
;
731 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_VRS
))
734 max_vrs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
);
735 mlxsw_sp
->router
->vrs
= kcalloc(max_vrs
, sizeof(struct mlxsw_sp_vr
),
737 if (!mlxsw_sp
->router
->vrs
)
740 for (i
= 0; i
< max_vrs
; i
++) {
741 vr
= &mlxsw_sp
->router
->vrs
[i
];
748 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp
*mlxsw_sp
);
750 static void mlxsw_sp_vrs_fini(struct mlxsw_sp
*mlxsw_sp
)
752 /* At this stage we're guaranteed not to have new incoming
753 * FIB notifications and the work queue is free from FIBs
754 * sitting on top of mlxsw netdevs. However, we can still
755 * have other FIBs queued. Flush the queue before flushing
756 * the device's tables. No need for locks, as we're the only
759 mlxsw_core_flush_owq();
760 mlxsw_sp_router_fib_flush(mlxsw_sp
);
761 kfree(mlxsw_sp
->router
->vrs
);
764 struct mlxsw_sp_neigh_key
{
768 struct mlxsw_sp_neigh_entry
{
769 struct list_head rif_list_node
;
770 struct rhash_head ht_node
;
771 struct mlxsw_sp_neigh_key key
;
774 unsigned char ha
[ETH_ALEN
];
775 struct list_head nexthop_list
; /* list of nexthops using
778 struct list_head nexthop_neighs_list_node
;
781 static const struct rhashtable_params mlxsw_sp_neigh_ht_params
= {
782 .key_offset
= offsetof(struct mlxsw_sp_neigh_entry
, key
),
783 .head_offset
= offsetof(struct mlxsw_sp_neigh_entry
, ht_node
),
784 .key_len
= sizeof(struct mlxsw_sp_neigh_key
),
787 static struct mlxsw_sp_neigh_entry
*
788 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
,
791 struct mlxsw_sp_neigh_entry
*neigh_entry
;
793 neigh_entry
= kzalloc(sizeof(*neigh_entry
), GFP_KERNEL
);
797 neigh_entry
->key
.n
= n
;
798 neigh_entry
->rif
= rif
;
799 INIT_LIST_HEAD(&neigh_entry
->nexthop_list
);
804 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry
*neigh_entry
)
810 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp
*mlxsw_sp
,
811 struct mlxsw_sp_neigh_entry
*neigh_entry
)
813 return rhashtable_insert_fast(&mlxsw_sp
->router
->neigh_ht
,
814 &neigh_entry
->ht_node
,
815 mlxsw_sp_neigh_ht_params
);
819 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp
*mlxsw_sp
,
820 struct mlxsw_sp_neigh_entry
*neigh_entry
)
822 rhashtable_remove_fast(&mlxsw_sp
->router
->neigh_ht
,
823 &neigh_entry
->ht_node
,
824 mlxsw_sp_neigh_ht_params
);
827 static struct mlxsw_sp_neigh_entry
*
828 mlxsw_sp_neigh_entry_create(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
)
830 struct mlxsw_sp_neigh_entry
*neigh_entry
;
831 struct mlxsw_sp_rif
*rif
;
834 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, n
->dev
);
836 return ERR_PTR(-EINVAL
);
838 neigh_entry
= mlxsw_sp_neigh_entry_alloc(mlxsw_sp
, n
, rif
->rif_index
);
840 return ERR_PTR(-ENOMEM
);
842 err
= mlxsw_sp_neigh_entry_insert(mlxsw_sp
, neigh_entry
);
844 goto err_neigh_entry_insert
;
846 list_add(&neigh_entry
->rif_list_node
, &rif
->neigh_list
);
850 err_neigh_entry_insert
:
851 mlxsw_sp_neigh_entry_free(neigh_entry
);
856 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
857 struct mlxsw_sp_neigh_entry
*neigh_entry
)
859 list_del(&neigh_entry
->rif_list_node
);
860 mlxsw_sp_neigh_entry_remove(mlxsw_sp
, neigh_entry
);
861 mlxsw_sp_neigh_entry_free(neigh_entry
);
864 static struct mlxsw_sp_neigh_entry
*
865 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp
*mlxsw_sp
, struct neighbour
*n
)
867 struct mlxsw_sp_neigh_key key
;
870 return rhashtable_lookup_fast(&mlxsw_sp
->router
->neigh_ht
,
871 &key
, mlxsw_sp_neigh_ht_params
);
875 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp
*mlxsw_sp
)
877 unsigned long interval
= NEIGH_VAR(&arp_tbl
.parms
, DELAY_PROBE_TIME
);
879 mlxsw_sp
->router
->neighs_update
.interval
= jiffies_to_msecs(interval
);
882 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp
*mlxsw_sp
,
886 struct net_device
*dev
;
892 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl
, ent_index
, &rif
, &dip
);
894 if (!mlxsw_sp
->router
->rifs
[rif
]) {
895 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect RIF in neighbour entry\n");
900 dev
= mlxsw_sp
->router
->rifs
[rif
]->dev
;
901 n
= neigh_lookup(&arp_tbl
, &dipn
, dev
);
903 netdev_err(dev
, "Failed to find matching neighbour for IP=%pI4h\n",
908 netdev_dbg(dev
, "Updating neighbour with IP=%pI4h\n", &dip
);
909 neigh_event_send(n
, NULL
);
913 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp
*mlxsw_sp
,
920 num_entries
= mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl
,
922 /* Hardware starts counting at 0, so add 1. */
925 /* Each record consists of several neighbour entries. */
926 for (i
= 0; i
< num_entries
; i
++) {
929 ent_index
= rec_index
* MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC
+ i
;
930 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp
, rauhtd_pl
,
936 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp
*mlxsw_sp
,
937 char *rauhtd_pl
, int rec_index
)
939 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl
, rec_index
)) {
940 case MLXSW_REG_RAUHTD_TYPE_IPV4
:
941 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp
, rauhtd_pl
,
944 case MLXSW_REG_RAUHTD_TYPE_IPV6
:
950 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl
)
952 u8 num_rec
, last_rec_index
, num_entries
;
954 num_rec
= mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl
);
955 last_rec_index
= num_rec
- 1;
957 if (num_rec
< MLXSW_REG_RAUHTD_REC_MAX_NUM
)
959 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl
, last_rec_index
) ==
960 MLXSW_REG_RAUHTD_TYPE_IPV6
)
963 num_entries
= mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl
,
965 if (++num_entries
== MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC
)
970 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp
*mlxsw_sp
)
976 rauhtd_pl
= kmalloc(MLXSW_REG_RAUHTD_LEN
, GFP_KERNEL
);
980 /* Make sure the neighbour's netdev isn't removed in the
985 mlxsw_reg_rauhtd_pack(rauhtd_pl
, MLXSW_REG_RAUHTD_TYPE_IPV4
);
986 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(rauhtd
),
989 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to dump neighbour talbe\n");
992 num_rec
= mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl
);
993 for (i
= 0; i
< num_rec
; i
++)
994 mlxsw_sp_router_neigh_rec_process(mlxsw_sp
, rauhtd_pl
,
996 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl
));
1003 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp
*mlxsw_sp
)
1005 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1007 /* Take RTNL mutex here to prevent lists from changes */
1009 list_for_each_entry(neigh_entry
, &mlxsw_sp
->router
->nexthop_neighs_list
,
1010 nexthop_neighs_list_node
)
1011 /* If this neigh have nexthops, make the kernel think this neigh
1012 * is active regardless of the traffic.
1014 neigh_event_send(neigh_entry
->key
.n
, NULL
);
1019 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1021 unsigned long interval
= mlxsw_sp
->router
->neighs_update
.interval
;
1023 mlxsw_core_schedule_dw(&mlxsw_sp
->router
->neighs_update
.dw
,
1024 msecs_to_jiffies(interval
));
1027 static void mlxsw_sp_router_neighs_update_work(struct work_struct
*work
)
1029 struct mlxsw_sp_router
*router
;
1032 router
= container_of(work
, struct mlxsw_sp_router
,
1033 neighs_update
.dw
.work
);
1034 err
= mlxsw_sp_router_neighs_update_rauhtd(router
->mlxsw_sp
);
1036 dev_err(router
->mlxsw_sp
->bus_info
->dev
, "Could not update kernel for neigh activity");
1038 mlxsw_sp_router_neighs_update_nh(router
->mlxsw_sp
);
1040 mlxsw_sp_router_neighs_update_work_schedule(router
->mlxsw_sp
);
1043 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct
*work
)
1045 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1046 struct mlxsw_sp_router
*router
;
1048 router
= container_of(work
, struct mlxsw_sp_router
,
1049 nexthop_probe_dw
.work
);
1050 /* Iterate over nexthop neighbours, find those who are unresolved and
1051 * send arp on them. This solves the chicken-egg problem when
1052 * the nexthop wouldn't get offloaded until the neighbor is resolved
1053 * but it wouldn't get resolved ever in case traffic is flowing in HW
1054 * using different nexthop.
1056 * Take RTNL mutex here to prevent lists from changes.
1059 list_for_each_entry(neigh_entry
, &router
->nexthop_neighs_list
,
1060 nexthop_neighs_list_node
)
1061 if (!neigh_entry
->connected
)
1062 neigh_event_send(neigh_entry
->key
.n
, NULL
);
1065 mlxsw_core_schedule_dw(&router
->nexthop_probe_dw
,
1066 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL
);
1070 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp
*mlxsw_sp
,
1071 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1074 static enum mlxsw_reg_rauht_op
mlxsw_sp_rauht_op(bool adding
)
1076 return adding
? MLXSW_REG_RAUHT_OP_WRITE_ADD
:
1077 MLXSW_REG_RAUHT_OP_WRITE_DELETE
;
1081 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp
*mlxsw_sp
,
1082 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1083 enum mlxsw_reg_rauht_op op
)
1085 struct neighbour
*n
= neigh_entry
->key
.n
;
1086 u32 dip
= ntohl(*((__be32
*) n
->primary_key
));
1087 char rauht_pl
[MLXSW_REG_RAUHT_LEN
];
1089 mlxsw_reg_rauht_pack4(rauht_pl
, op
, neigh_entry
->rif
, neigh_entry
->ha
,
1091 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rauht
), rauht_pl
);
1095 mlxsw_sp_neigh_entry_update(struct mlxsw_sp
*mlxsw_sp
,
1096 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1099 if (!adding
&& !neigh_entry
->connected
)
1101 neigh_entry
->connected
= adding
;
1102 if (neigh_entry
->key
.n
->tbl
== &arp_tbl
)
1103 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp
, neigh_entry
,
1104 mlxsw_sp_rauht_op(adding
));
1109 struct mlxsw_sp_neigh_event_work
{
1110 struct work_struct work
;
1111 struct mlxsw_sp
*mlxsw_sp
;
1112 struct neighbour
*n
;
1115 static void mlxsw_sp_router_neigh_event_work(struct work_struct
*work
)
1117 struct mlxsw_sp_neigh_event_work
*neigh_work
=
1118 container_of(work
, struct mlxsw_sp_neigh_event_work
, work
);
1119 struct mlxsw_sp
*mlxsw_sp
= neigh_work
->mlxsw_sp
;
1120 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1121 struct neighbour
*n
= neigh_work
->n
;
1122 unsigned char ha
[ETH_ALEN
];
1123 bool entry_connected
;
1126 /* If these parameters are changed after we release the lock,
1127 * then we are guaranteed to receive another event letting us
1130 read_lock_bh(&n
->lock
);
1131 memcpy(ha
, n
->ha
, ETH_ALEN
);
1132 nud_state
= n
->nud_state
;
1134 read_unlock_bh(&n
->lock
);
1137 entry_connected
= nud_state
& NUD_VALID
&& !dead
;
1138 neigh_entry
= mlxsw_sp_neigh_entry_lookup(mlxsw_sp
, n
);
1139 if (!entry_connected
&& !neigh_entry
)
1142 neigh_entry
= mlxsw_sp_neigh_entry_create(mlxsw_sp
, n
);
1143 if (IS_ERR(neigh_entry
))
1147 memcpy(neigh_entry
->ha
, ha
, ETH_ALEN
);
1148 mlxsw_sp_neigh_entry_update(mlxsw_sp
, neigh_entry
, entry_connected
);
1149 mlxsw_sp_nexthop_neigh_update(mlxsw_sp
, neigh_entry
, !entry_connected
);
1151 if (!neigh_entry
->connected
&& list_empty(&neigh_entry
->nexthop_list
))
1152 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1160 int mlxsw_sp_router_netevent_event(struct notifier_block
*unused
,
1161 unsigned long event
, void *ptr
)
1163 struct mlxsw_sp_neigh_event_work
*neigh_work
;
1164 struct mlxsw_sp_port
*mlxsw_sp_port
;
1165 struct mlxsw_sp
*mlxsw_sp
;
1166 unsigned long interval
;
1167 struct neigh_parms
*p
;
1168 struct neighbour
*n
;
1171 case NETEVENT_DELAY_PROBE_TIME_UPDATE
:
1174 /* We don't care about changes in the default table. */
1175 if (!p
->dev
|| p
->tbl
!= &arp_tbl
)
1178 /* We are in atomic context and can't take RTNL mutex,
1179 * so use RCU variant to walk the device chain.
1181 mlxsw_sp_port
= mlxsw_sp_port_lower_dev_hold(p
->dev
);
1185 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1186 interval
= jiffies_to_msecs(NEIGH_VAR(p
, DELAY_PROBE_TIME
));
1187 mlxsw_sp
->router
->neighs_update
.interval
= interval
;
1189 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1191 case NETEVENT_NEIGH_UPDATE
:
1194 if (n
->tbl
!= &arp_tbl
)
1197 mlxsw_sp_port
= mlxsw_sp_port_lower_dev_hold(n
->dev
);
1201 neigh_work
= kzalloc(sizeof(*neigh_work
), GFP_ATOMIC
);
1203 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1207 INIT_WORK(&neigh_work
->work
, mlxsw_sp_router_neigh_event_work
);
1208 neigh_work
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1211 /* Take a reference to ensure the neighbour won't be
1212 * destructed until we drop the reference in delayed
1216 mlxsw_core_schedule_work(&neigh_work
->work
);
1217 mlxsw_sp_port_dev_put(mlxsw_sp_port
);
1224 static int mlxsw_sp_neigh_init(struct mlxsw_sp
*mlxsw_sp
)
1228 err
= rhashtable_init(&mlxsw_sp
->router
->neigh_ht
,
1229 &mlxsw_sp_neigh_ht_params
);
1233 /* Initialize the polling interval according to the default
1236 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp
);
1238 /* Create the delayed works for the activity_update */
1239 INIT_DELAYED_WORK(&mlxsw_sp
->router
->neighs_update
.dw
,
1240 mlxsw_sp_router_neighs_update_work
);
1241 INIT_DELAYED_WORK(&mlxsw_sp
->router
->nexthop_probe_dw
,
1242 mlxsw_sp_router_probe_unresolved_nexthops
);
1243 mlxsw_core_schedule_dw(&mlxsw_sp
->router
->neighs_update
.dw
, 0);
1244 mlxsw_core_schedule_dw(&mlxsw_sp
->router
->nexthop_probe_dw
, 0);
1248 static void mlxsw_sp_neigh_fini(struct mlxsw_sp
*mlxsw_sp
)
1250 cancel_delayed_work_sync(&mlxsw_sp
->router
->neighs_update
.dw
);
1251 cancel_delayed_work_sync(&mlxsw_sp
->router
->nexthop_probe_dw
);
1252 rhashtable_destroy(&mlxsw_sp
->router
->neigh_ht
);
1255 static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp
*mlxsw_sp
,
1256 const struct mlxsw_sp_rif
*rif
)
1258 char rauht_pl
[MLXSW_REG_RAUHT_LEN
];
1260 mlxsw_reg_rauht_pack(rauht_pl
, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL
,
1261 rif
->rif_index
, rif
->addr
);
1262 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rauht
), rauht_pl
);
1265 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
1266 struct mlxsw_sp_rif
*rif
)
1268 struct mlxsw_sp_neigh_entry
*neigh_entry
, *tmp
;
1270 mlxsw_sp_neigh_rif_flush(mlxsw_sp
, rif
);
1271 list_for_each_entry_safe(neigh_entry
, tmp
, &rif
->neigh_list
,
1273 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1276 struct mlxsw_sp_nexthop_key
{
1277 struct fib_nh
*fib_nh
;
1280 struct mlxsw_sp_nexthop
{
1281 struct list_head neigh_list_node
; /* member of neigh entry list */
1282 struct list_head rif_list_node
;
1283 struct mlxsw_sp_nexthop_group
*nh_grp
; /* pointer back to the group
1286 struct rhash_head ht_node
;
1287 struct mlxsw_sp_nexthop_key key
;
1288 struct mlxsw_sp_rif
*rif
;
1289 u8 should_offload
:1, /* set indicates this neigh is connected and
1290 * should be put to KVD linear area of this group.
1292 offloaded
:1, /* set in case the neigh is actually put into
1293 * KVD linear area of this group.
1295 update
:1; /* set indicates that MAC of this neigh should be
1298 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1301 struct mlxsw_sp_nexthop_group_key
{
1302 struct fib_info
*fi
;
1305 struct mlxsw_sp_nexthop_group
{
1306 struct rhash_head ht_node
;
1307 struct list_head fib_list
; /* list of fib entries that use this group */
1308 struct mlxsw_sp_nexthop_group_key key
;
1309 u8 adj_index_valid
:1,
1310 gateway
:1; /* routes using the group use a gateway */
1314 struct mlxsw_sp_nexthop nexthops
[0];
1315 #define nh_rif nexthops[0].rif
1318 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params
= {
1319 .key_offset
= offsetof(struct mlxsw_sp_nexthop_group
, key
),
1320 .head_offset
= offsetof(struct mlxsw_sp_nexthop_group
, ht_node
),
1321 .key_len
= sizeof(struct mlxsw_sp_nexthop_group_key
),
1324 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp
*mlxsw_sp
,
1325 struct mlxsw_sp_nexthop_group
*nh_grp
)
1327 return rhashtable_insert_fast(&mlxsw_sp
->router
->nexthop_group_ht
,
1329 mlxsw_sp_nexthop_group_ht_params
);
1332 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp
*mlxsw_sp
,
1333 struct mlxsw_sp_nexthop_group
*nh_grp
)
1335 rhashtable_remove_fast(&mlxsw_sp
->router
->nexthop_group_ht
,
1337 mlxsw_sp_nexthop_group_ht_params
);
1340 static struct mlxsw_sp_nexthop_group
*
1341 mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp
*mlxsw_sp
,
1342 struct mlxsw_sp_nexthop_group_key key
)
1344 return rhashtable_lookup_fast(&mlxsw_sp
->router
->nexthop_group_ht
, &key
,
1345 mlxsw_sp_nexthop_group_ht_params
);
1348 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params
= {
1349 .key_offset
= offsetof(struct mlxsw_sp_nexthop
, key
),
1350 .head_offset
= offsetof(struct mlxsw_sp_nexthop
, ht_node
),
1351 .key_len
= sizeof(struct mlxsw_sp_nexthop_key
),
1354 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp
*mlxsw_sp
,
1355 struct mlxsw_sp_nexthop
*nh
)
1357 return rhashtable_insert_fast(&mlxsw_sp
->router
->nexthop_ht
,
1358 &nh
->ht_node
, mlxsw_sp_nexthop_ht_params
);
1361 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp
*mlxsw_sp
,
1362 struct mlxsw_sp_nexthop
*nh
)
1364 rhashtable_remove_fast(&mlxsw_sp
->router
->nexthop_ht
, &nh
->ht_node
,
1365 mlxsw_sp_nexthop_ht_params
);
1368 static struct mlxsw_sp_nexthop
*
1369 mlxsw_sp_nexthop_lookup(struct mlxsw_sp
*mlxsw_sp
,
1370 struct mlxsw_sp_nexthop_key key
)
1372 return rhashtable_lookup_fast(&mlxsw_sp
->router
->nexthop_ht
, &key
,
1373 mlxsw_sp_nexthop_ht_params
);
1376 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp
*mlxsw_sp
,
1377 const struct mlxsw_sp_fib
*fib
,
1378 u32 adj_index
, u16 ecmp_size
,
1382 char raleu_pl
[MLXSW_REG_RALEU_LEN
];
1384 mlxsw_reg_raleu_pack(raleu_pl
,
1385 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
,
1386 fib
->vr
->id
, adj_index
, ecmp_size
, new_adj_index
,
1388 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raleu
), raleu_pl
);
1391 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp
*mlxsw_sp
,
1392 struct mlxsw_sp_nexthop_group
*nh_grp
,
1393 u32 old_adj_index
, u16 old_ecmp_size
)
1395 struct mlxsw_sp_fib_entry
*fib_entry
;
1396 struct mlxsw_sp_fib
*fib
= NULL
;
1399 list_for_each_entry(fib_entry
, &nh_grp
->fib_list
, nexthop_group_node
) {
1400 if (fib
== fib_entry
->fib_node
->fib
)
1402 fib
= fib_entry
->fib_node
->fib
;
1403 err
= mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp
, fib
,
1414 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp
*mlxsw_sp
, u32 adj_index
,
1415 struct mlxsw_sp_nexthop
*nh
)
1417 struct mlxsw_sp_neigh_entry
*neigh_entry
= nh
->neigh_entry
;
1418 char ratr_pl
[MLXSW_REG_RATR_LEN
];
1420 mlxsw_reg_ratr_pack(ratr_pl
, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY
,
1421 true, adj_index
, neigh_entry
->rif
);
1422 mlxsw_reg_ratr_eth_entry_pack(ratr_pl
, neigh_entry
->ha
);
1423 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ratr
), ratr_pl
);
1427 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp
*mlxsw_sp
,
1428 struct mlxsw_sp_nexthop_group
*nh_grp
,
1431 u32 adj_index
= nh_grp
->adj_index
; /* base */
1432 struct mlxsw_sp_nexthop
*nh
;
1436 for (i
= 0; i
< nh_grp
->count
; i
++) {
1437 nh
= &nh_grp
->nexthops
[i
];
1439 if (!nh
->should_offload
) {
1444 if (nh
->update
|| reallocate
) {
1445 err
= mlxsw_sp_nexthop_mac_update(mlxsw_sp
,
1457 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp
*mlxsw_sp
,
1458 struct mlxsw_sp_fib_entry
*fib_entry
);
1461 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp
*mlxsw_sp
,
1462 struct mlxsw_sp_nexthop_group
*nh_grp
)
1464 struct mlxsw_sp_fib_entry
*fib_entry
;
1467 list_for_each_entry(fib_entry
, &nh_grp
->fib_list
, nexthop_group_node
) {
1468 err
= mlxsw_sp_fib_entry_update(mlxsw_sp
, fib_entry
);
1476 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp
*mlxsw_sp
,
1477 struct mlxsw_sp_nexthop_group
*nh_grp
)
1479 struct mlxsw_sp_nexthop
*nh
;
1480 bool offload_change
= false;
1483 bool old_adj_index_valid
;
1489 if (!nh_grp
->gateway
) {
1490 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1494 for (i
= 0; i
< nh_grp
->count
; i
++) {
1495 nh
= &nh_grp
->nexthops
[i
];
1497 if (nh
->should_offload
^ nh
->offloaded
) {
1498 offload_change
= true;
1499 if (nh
->should_offload
)
1502 if (nh
->should_offload
)
1505 if (!offload_change
) {
1506 /* Nothing was added or removed, so no need to reallocate. Just
1507 * update MAC on existing adjacency indexes.
1509 err
= mlxsw_sp_nexthop_group_mac_update(mlxsw_sp
, nh_grp
,
1512 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to update neigh MAC in adjacency table.\n");
1518 /* No neigh of this group is connected so we just set
1519 * the trap and let everthing flow through kernel.
1523 err
= mlxsw_sp_kvdl_alloc(mlxsw_sp
, ecmp_size
, &adj_index
);
1525 /* We ran out of KVD linear space, just set the
1526 * trap and let everything flow through kernel.
1528 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to allocate KVD linear area for nexthop group.\n");
1531 old_adj_index_valid
= nh_grp
->adj_index_valid
;
1532 old_adj_index
= nh_grp
->adj_index
;
1533 old_ecmp_size
= nh_grp
->ecmp_size
;
1534 nh_grp
->adj_index_valid
= 1;
1535 nh_grp
->adj_index
= adj_index
;
1536 nh_grp
->ecmp_size
= ecmp_size
;
1537 err
= mlxsw_sp_nexthop_group_mac_update(mlxsw_sp
, nh_grp
, true);
1539 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to update neigh MAC in adjacency table.\n");
1543 if (!old_adj_index_valid
) {
1544 /* The trap was set for fib entries, so we have to call
1545 * fib entry update to unset it and use adjacency index.
1547 err
= mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1549 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to add adjacency index to fib entries.\n");
1555 err
= mlxsw_sp_adj_index_mass_update(mlxsw_sp
, nh_grp
,
1556 old_adj_index
, old_ecmp_size
);
1557 mlxsw_sp_kvdl_free(mlxsw_sp
, old_adj_index
);
1559 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to mass-update adjacency index for nexthop group.\n");
1565 old_adj_index_valid
= nh_grp
->adj_index_valid
;
1566 nh_grp
->adj_index_valid
= 0;
1567 for (i
= 0; i
< nh_grp
->count
; i
++) {
1568 nh
= &nh_grp
->nexthops
[i
];
1571 err
= mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp
, nh_grp
);
1573 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for fib entries.\n");
1574 if (old_adj_index_valid
)
1575 mlxsw_sp_kvdl_free(mlxsw_sp
, nh_grp
->adj_index
);
1578 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop
*nh
,
1581 if (!removing
&& !nh
->should_offload
)
1582 nh
->should_offload
= 1;
1583 else if (removing
&& nh
->offloaded
)
1584 nh
->should_offload
= 0;
1589 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp
*mlxsw_sp
,
1590 struct mlxsw_sp_neigh_entry
*neigh_entry
,
1593 struct mlxsw_sp_nexthop
*nh
;
1595 list_for_each_entry(nh
, &neigh_entry
->nexthop_list
,
1597 __mlxsw_sp_nexthop_neigh_update(nh
, removing
);
1598 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1602 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop
*nh
,
1603 struct mlxsw_sp_rif
*rif
)
1609 list_add(&nh
->rif_list_node
, &rif
->nexthop_list
);
1612 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop
*nh
)
1617 list_del(&nh
->rif_list_node
);
1621 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp
*mlxsw_sp
,
1622 struct mlxsw_sp_nexthop
*nh
)
1624 struct mlxsw_sp_neigh_entry
*neigh_entry
;
1625 struct fib_nh
*fib_nh
= nh
->key
.fib_nh
;
1626 struct neighbour
*n
;
1630 if (!nh
->nh_grp
->gateway
|| nh
->neigh_entry
)
1633 /* Take a reference of neigh here ensuring that neigh would
1634 * not be detructed before the nexthop entry is finished.
1635 * The reference is taken either in neigh_lookup() or
1636 * in neigh_create() in case n is not found.
1638 n
= neigh_lookup(&arp_tbl
, &fib_nh
->nh_gw
, fib_nh
->nh_dev
);
1640 n
= neigh_create(&arp_tbl
, &fib_nh
->nh_gw
, fib_nh
->nh_dev
);
1643 neigh_event_send(n
, NULL
);
1645 neigh_entry
= mlxsw_sp_neigh_entry_lookup(mlxsw_sp
, n
);
1647 neigh_entry
= mlxsw_sp_neigh_entry_create(mlxsw_sp
, n
);
1648 if (IS_ERR(neigh_entry
)) {
1650 goto err_neigh_entry_create
;
1654 /* If that is the first nexthop connected to that neigh, add to
1655 * nexthop_neighs_list
1657 if (list_empty(&neigh_entry
->nexthop_list
))
1658 list_add_tail(&neigh_entry
->nexthop_neighs_list_node
,
1659 &mlxsw_sp
->router
->nexthop_neighs_list
);
1661 nh
->neigh_entry
= neigh_entry
;
1662 list_add_tail(&nh
->neigh_list_node
, &neigh_entry
->nexthop_list
);
1663 read_lock_bh(&n
->lock
);
1664 nud_state
= n
->nud_state
;
1666 read_unlock_bh(&n
->lock
);
1667 __mlxsw_sp_nexthop_neigh_update(nh
, !(nud_state
& NUD_VALID
&& !dead
));
1671 err_neigh_entry_create
:
1676 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp
*mlxsw_sp
,
1677 struct mlxsw_sp_nexthop
*nh
)
1679 struct mlxsw_sp_neigh_entry
*neigh_entry
= nh
->neigh_entry
;
1680 struct neighbour
*n
;
1684 n
= neigh_entry
->key
.n
;
1686 __mlxsw_sp_nexthop_neigh_update(nh
, true);
1687 list_del(&nh
->neigh_list_node
);
1688 nh
->neigh_entry
= NULL
;
1690 /* If that is the last nexthop connected to that neigh, remove from
1691 * nexthop_neighs_list
1693 if (list_empty(&neigh_entry
->nexthop_list
))
1694 list_del(&neigh_entry
->nexthop_neighs_list_node
);
1696 if (!neigh_entry
->connected
&& list_empty(&neigh_entry
->nexthop_list
))
1697 mlxsw_sp_neigh_entry_destroy(mlxsw_sp
, neigh_entry
);
1702 static int mlxsw_sp_nexthop_init(struct mlxsw_sp
*mlxsw_sp
,
1703 struct mlxsw_sp_nexthop_group
*nh_grp
,
1704 struct mlxsw_sp_nexthop
*nh
,
1705 struct fib_nh
*fib_nh
)
1707 struct net_device
*dev
= fib_nh
->nh_dev
;
1708 struct in_device
*in_dev
;
1709 struct mlxsw_sp_rif
*rif
;
1712 nh
->nh_grp
= nh_grp
;
1713 nh
->key
.fib_nh
= fib_nh
;
1714 err
= mlxsw_sp_nexthop_insert(mlxsw_sp
, nh
);
1721 in_dev
= __in_dev_get_rtnl(dev
);
1722 if (in_dev
&& IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
) &&
1723 fib_nh
->nh_flags
& RTNH_F_LINKDOWN
)
1726 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
1729 mlxsw_sp_nexthop_rif_init(nh
, rif
);
1731 err
= mlxsw_sp_nexthop_neigh_init(mlxsw_sp
, nh
);
1733 goto err_nexthop_neigh_init
;
1737 err_nexthop_neigh_init
:
1738 mlxsw_sp_nexthop_remove(mlxsw_sp
, nh
);
1742 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp
*mlxsw_sp
,
1743 struct mlxsw_sp_nexthop
*nh
)
1745 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1746 mlxsw_sp_nexthop_rif_fini(nh
);
1747 mlxsw_sp_nexthop_remove(mlxsw_sp
, nh
);
1750 static void mlxsw_sp_nexthop_event(struct mlxsw_sp
*mlxsw_sp
,
1751 unsigned long event
, struct fib_nh
*fib_nh
)
1753 struct mlxsw_sp_nexthop_key key
;
1754 struct mlxsw_sp_nexthop
*nh
;
1755 struct mlxsw_sp_rif
*rif
;
1757 if (mlxsw_sp
->router
->aborted
)
1760 key
.fib_nh
= fib_nh
;
1761 nh
= mlxsw_sp_nexthop_lookup(mlxsw_sp
, key
);
1762 if (WARN_ON_ONCE(!nh
))
1765 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, fib_nh
->nh_dev
);
1770 case FIB_EVENT_NH_ADD
:
1771 mlxsw_sp_nexthop_rif_init(nh
, rif
);
1772 mlxsw_sp_nexthop_neigh_init(mlxsw_sp
, nh
);
1774 case FIB_EVENT_NH_DEL
:
1775 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1776 mlxsw_sp_nexthop_rif_fini(nh
);
1780 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1783 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
1784 struct mlxsw_sp_rif
*rif
)
1786 struct mlxsw_sp_nexthop
*nh
, *tmp
;
1788 list_for_each_entry_safe(nh
, tmp
, &rif
->nexthop_list
, rif_list_node
) {
1789 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp
, nh
);
1790 mlxsw_sp_nexthop_rif_fini(nh
);
1791 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh
->nh_grp
);
1795 static struct mlxsw_sp_nexthop_group
*
1796 mlxsw_sp_nexthop_group_create(struct mlxsw_sp
*mlxsw_sp
, struct fib_info
*fi
)
1798 struct mlxsw_sp_nexthop_group
*nh_grp
;
1799 struct mlxsw_sp_nexthop
*nh
;
1800 struct fib_nh
*fib_nh
;
1805 alloc_size
= sizeof(*nh_grp
) +
1806 fi
->fib_nhs
* sizeof(struct mlxsw_sp_nexthop
);
1807 nh_grp
= kzalloc(alloc_size
, GFP_KERNEL
);
1809 return ERR_PTR(-ENOMEM
);
1810 INIT_LIST_HEAD(&nh_grp
->fib_list
);
1811 nh_grp
->gateway
= fi
->fib_nh
->nh_scope
== RT_SCOPE_LINK
;
1812 nh_grp
->count
= fi
->fib_nhs
;
1813 nh_grp
->key
.fi
= fi
;
1814 for (i
= 0; i
< nh_grp
->count
; i
++) {
1815 nh
= &nh_grp
->nexthops
[i
];
1816 fib_nh
= &fi
->fib_nh
[i
];
1817 err
= mlxsw_sp_nexthop_init(mlxsw_sp
, nh_grp
, nh
, fib_nh
);
1819 goto err_nexthop_init
;
1821 err
= mlxsw_sp_nexthop_group_insert(mlxsw_sp
, nh_grp
);
1823 goto err_nexthop_group_insert
;
1824 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh_grp
);
1827 err_nexthop_group_insert
:
1829 for (i
--; i
>= 0; i
--) {
1830 nh
= &nh_grp
->nexthops
[i
];
1831 mlxsw_sp_nexthop_fini(mlxsw_sp
, nh
);
1834 return ERR_PTR(err
);
1838 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp
*mlxsw_sp
,
1839 struct mlxsw_sp_nexthop_group
*nh_grp
)
1841 struct mlxsw_sp_nexthop
*nh
;
1844 mlxsw_sp_nexthop_group_remove(mlxsw_sp
, nh_grp
);
1845 for (i
= 0; i
< nh_grp
->count
; i
++) {
1846 nh
= &nh_grp
->nexthops
[i
];
1847 mlxsw_sp_nexthop_fini(mlxsw_sp
, nh
);
1849 mlxsw_sp_nexthop_group_refresh(mlxsw_sp
, nh_grp
);
1850 WARN_ON_ONCE(nh_grp
->adj_index_valid
);
1854 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp
*mlxsw_sp
,
1855 struct mlxsw_sp_fib_entry
*fib_entry
,
1856 struct fib_info
*fi
)
1858 struct mlxsw_sp_nexthop_group_key key
;
1859 struct mlxsw_sp_nexthop_group
*nh_grp
;
1862 nh_grp
= mlxsw_sp_nexthop_group_lookup(mlxsw_sp
, key
);
1864 nh_grp
= mlxsw_sp_nexthop_group_create(mlxsw_sp
, fi
);
1866 return PTR_ERR(nh_grp
);
1868 list_add_tail(&fib_entry
->nexthop_group_node
, &nh_grp
->fib_list
);
1869 fib_entry
->nh_group
= nh_grp
;
1873 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp
*mlxsw_sp
,
1874 struct mlxsw_sp_fib_entry
*fib_entry
)
1876 struct mlxsw_sp_nexthop_group
*nh_grp
= fib_entry
->nh_group
;
1878 list_del(&fib_entry
->nexthop_group_node
);
1879 if (!list_empty(&nh_grp
->fib_list
))
1881 mlxsw_sp_nexthop_group_destroy(mlxsw_sp
, nh_grp
);
1885 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry
*fib_entry
)
1887 struct mlxsw_sp_nexthop_group
*nh_group
= fib_entry
->nh_group
;
1889 if (fib_entry
->params
.tos
)
1892 switch (fib_entry
->type
) {
1893 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
:
1894 return !!nh_group
->adj_index_valid
;
1895 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
:
1896 return !!nh_group
->nh_rif
;
1902 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry
*fib_entry
)
1904 fib_entry
->offloaded
= true;
1906 switch (fib_entry
->fib_node
->fib
->proto
) {
1907 case MLXSW_SP_L3_PROTO_IPV4
:
1908 fib_info_offload_inc(fib_entry
->nh_group
->key
.fi
);
1910 case MLXSW_SP_L3_PROTO_IPV6
:
1916 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry
*fib_entry
)
1918 switch (fib_entry
->fib_node
->fib
->proto
) {
1919 case MLXSW_SP_L3_PROTO_IPV4
:
1920 fib_info_offload_dec(fib_entry
->nh_group
->key
.fi
);
1922 case MLXSW_SP_L3_PROTO_IPV6
:
1926 fib_entry
->offloaded
= false;
1930 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry
*fib_entry
,
1931 enum mlxsw_reg_ralue_op op
, int err
)
1934 case MLXSW_REG_RALUE_OP_WRITE_DELETE
:
1935 if (!fib_entry
->offloaded
)
1937 return mlxsw_sp_fib_entry_offload_unset(fib_entry
);
1938 case MLXSW_REG_RALUE_OP_WRITE_WRITE
:
1941 if (mlxsw_sp_fib_entry_should_offload(fib_entry
) &&
1942 !fib_entry
->offloaded
)
1943 mlxsw_sp_fib_entry_offload_set(fib_entry
);
1944 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry
) &&
1945 fib_entry
->offloaded
)
1946 mlxsw_sp_fib_entry_offload_unset(fib_entry
);
1953 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp
*mlxsw_sp
,
1954 struct mlxsw_sp_fib_entry
*fib_entry
,
1955 enum mlxsw_reg_ralue_op op
)
1957 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
1958 struct mlxsw_sp_fib
*fib
= fib_entry
->fib_node
->fib
;
1959 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
1960 enum mlxsw_reg_ralue_trap_action trap_action
;
1962 u32 adjacency_index
= 0;
1965 /* In case the nexthop group adjacency index is valid, use it
1966 * with provided ECMP size. Otherwise, setup trap and pass
1967 * traffic to kernel.
1969 if (mlxsw_sp_fib_entry_should_offload(fib_entry
)) {
1970 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_NOP
;
1971 adjacency_index
= fib_entry
->nh_group
->adj_index
;
1972 ecmp_size
= fib_entry
->nh_group
->ecmp_size
;
1974 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_TRAP
;
1975 trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
;
1978 mlxsw_reg_ralue_pack4(ralue_pl
,
1979 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, op
,
1980 fib
->vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
1982 mlxsw_reg_ralue_act_remote_pack(ralue_pl
, trap_action
, trap_id
,
1983 adjacency_index
, ecmp_size
);
1984 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
1987 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp
*mlxsw_sp
,
1988 struct mlxsw_sp_fib_entry
*fib_entry
,
1989 enum mlxsw_reg_ralue_op op
)
1991 struct mlxsw_sp_rif
*rif
= fib_entry
->nh_group
->nh_rif
;
1992 struct mlxsw_sp_fib
*fib
= fib_entry
->fib_node
->fib
;
1993 enum mlxsw_reg_ralue_trap_action trap_action
;
1994 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
1995 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
1999 if (mlxsw_sp_fib_entry_should_offload(fib_entry
)) {
2000 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_NOP
;
2001 rif_index
= rif
->rif_index
;
2003 trap_action
= MLXSW_REG_RALUE_TRAP_ACTION_TRAP
;
2004 trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
;
2007 mlxsw_reg_ralue_pack4(ralue_pl
,
2008 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, op
,
2009 fib
->vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
2011 mlxsw_reg_ralue_act_local_pack(ralue_pl
, trap_action
, trap_id
,
2013 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
2016 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp
*mlxsw_sp
,
2017 struct mlxsw_sp_fib_entry
*fib_entry
,
2018 enum mlxsw_reg_ralue_op op
)
2020 struct mlxsw_sp_fib
*fib
= fib_entry
->fib_node
->fib
;
2021 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
2022 u32
*p_dip
= (u32
*) fib_entry
->fib_node
->key
.addr
;
2024 mlxsw_reg_ralue_pack4(ralue_pl
,
2025 (enum mlxsw_reg_ralxx_protocol
) fib
->proto
, op
,
2026 fib
->vr
->id
, fib_entry
->fib_node
->key
.prefix_len
,
2028 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl
);
2029 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
), ralue_pl
);
2032 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp
*mlxsw_sp
,
2033 struct mlxsw_sp_fib_entry
*fib_entry
,
2034 enum mlxsw_reg_ralue_op op
)
2036 switch (fib_entry
->type
) {
2037 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
:
2038 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp
, fib_entry
, op
);
2039 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
:
2040 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp
, fib_entry
, op
);
2041 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP
:
2042 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp
, fib_entry
, op
);
2047 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp
*mlxsw_sp
,
2048 struct mlxsw_sp_fib_entry
*fib_entry
,
2049 enum mlxsw_reg_ralue_op op
)
2053 switch (fib_entry
->fib_node
->fib
->proto
) {
2054 case MLXSW_SP_L3_PROTO_IPV4
:
2055 err
= mlxsw_sp_fib_entry_op4(mlxsw_sp
, fib_entry
, op
);
2057 case MLXSW_SP_L3_PROTO_IPV6
:
2060 mlxsw_sp_fib_entry_offload_refresh(fib_entry
, op
, err
);
2064 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp
*mlxsw_sp
,
2065 struct mlxsw_sp_fib_entry
*fib_entry
)
2067 return mlxsw_sp_fib_entry_op(mlxsw_sp
, fib_entry
,
2068 MLXSW_REG_RALUE_OP_WRITE_WRITE
);
2071 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp
*mlxsw_sp
,
2072 struct mlxsw_sp_fib_entry
*fib_entry
)
2074 return mlxsw_sp_fib_entry_op(mlxsw_sp
, fib_entry
,
2075 MLXSW_REG_RALUE_OP_WRITE_DELETE
);
2079 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp
*mlxsw_sp
,
2080 const struct fib_entry_notifier_info
*fen_info
,
2081 struct mlxsw_sp_fib_entry
*fib_entry
)
2083 struct fib_info
*fi
= fen_info
->fi
;
2085 switch (fen_info
->type
) {
2086 case RTN_BROADCAST
: /* fall through */
2088 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_TRAP
;
2090 case RTN_UNREACHABLE
: /* fall through */
2091 case RTN_BLACKHOLE
: /* fall through */
2093 /* Packets hitting these routes need to be trapped, but
2094 * can do so with a lower priority than packets directed
2095 * at the host, so use action type local instead of trap.
2097 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
;
2100 if (fi
->fib_nh
->nh_scope
!= RT_SCOPE_LINK
)
2101 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_LOCAL
;
2103 fib_entry
->type
= MLXSW_SP_FIB_ENTRY_TYPE_REMOTE
;
2110 static struct mlxsw_sp_fib_entry
*
2111 mlxsw_sp_fib4_entry_create(struct mlxsw_sp
*mlxsw_sp
,
2112 struct mlxsw_sp_fib_node
*fib_node
,
2113 const struct fib_entry_notifier_info
*fen_info
)
2115 struct mlxsw_sp_fib_entry
*fib_entry
;
2118 fib_entry
= kzalloc(sizeof(*fib_entry
), GFP_KERNEL
);
2121 goto err_fib_entry_alloc
;
2124 err
= mlxsw_sp_fib4_entry_type_set(mlxsw_sp
, fen_info
, fib_entry
);
2126 goto err_fib4_entry_type_set
;
2128 err
= mlxsw_sp_nexthop_group_get(mlxsw_sp
, fib_entry
, fen_info
->fi
);
2130 goto err_nexthop_group_get
;
2132 fib_entry
->params
.prio
= fen_info
->fi
->fib_priority
;
2133 fib_entry
->params
.tb_id
= fen_info
->tb_id
;
2134 fib_entry
->params
.type
= fen_info
->type
;
2135 fib_entry
->params
.tos
= fen_info
->tos
;
2137 fib_entry
->fib_node
= fib_node
;
2141 err_nexthop_group_get
:
2142 err_fib4_entry_type_set
:
2144 err_fib_entry_alloc
:
2145 return ERR_PTR(err
);
2148 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
2149 struct mlxsw_sp_fib_entry
*fib_entry
)
2151 mlxsw_sp_nexthop_group_put(mlxsw_sp
, fib_entry
);
2155 static struct mlxsw_sp_fib_node
*
2156 mlxsw_sp_fib4_node_get(struct mlxsw_sp
*mlxsw_sp
,
2157 const struct fib_entry_notifier_info
*fen_info
);
2159 static struct mlxsw_sp_fib_entry
*
2160 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp
*mlxsw_sp
,
2161 const struct fib_entry_notifier_info
*fen_info
)
2163 struct mlxsw_sp_fib_entry
*fib_entry
;
2164 struct mlxsw_sp_fib_node
*fib_node
;
2166 fib_node
= mlxsw_sp_fib4_node_get(mlxsw_sp
, fen_info
);
2167 if (IS_ERR(fib_node
))
2170 list_for_each_entry(fib_entry
, &fib_node
->entry_list
, list
) {
2171 if (fib_entry
->params
.tb_id
== fen_info
->tb_id
&&
2172 fib_entry
->params
.tos
== fen_info
->tos
&&
2173 fib_entry
->params
.type
== fen_info
->type
&&
2174 fib_entry
->nh_group
->key
.fi
== fen_info
->fi
) {
2182 static const struct rhashtable_params mlxsw_sp_fib_ht_params
= {
2183 .key_offset
= offsetof(struct mlxsw_sp_fib_node
, key
),
2184 .head_offset
= offsetof(struct mlxsw_sp_fib_node
, ht_node
),
2185 .key_len
= sizeof(struct mlxsw_sp_fib_key
),
2186 .automatic_shrinking
= true,
2189 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib
*fib
,
2190 struct mlxsw_sp_fib_node
*fib_node
)
2192 return rhashtable_insert_fast(&fib
->ht
, &fib_node
->ht_node
,
2193 mlxsw_sp_fib_ht_params
);
2196 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib
*fib
,
2197 struct mlxsw_sp_fib_node
*fib_node
)
2199 rhashtable_remove_fast(&fib
->ht
, &fib_node
->ht_node
,
2200 mlxsw_sp_fib_ht_params
);
2203 static struct mlxsw_sp_fib_node
*
2204 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib
*fib
, const void *addr
,
2205 size_t addr_len
, unsigned char prefix_len
)
2207 struct mlxsw_sp_fib_key key
;
2209 memset(&key
, 0, sizeof(key
));
2210 memcpy(key
.addr
, addr
, addr_len
);
2211 key
.prefix_len
= prefix_len
;
2212 return rhashtable_lookup_fast(&fib
->ht
, &key
, mlxsw_sp_fib_ht_params
);
2215 static struct mlxsw_sp_fib_node
*
2216 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib
*fib
, const void *addr
,
2217 size_t addr_len
, unsigned char prefix_len
)
2219 struct mlxsw_sp_fib_node
*fib_node
;
2221 fib_node
= kzalloc(sizeof(*fib_node
), GFP_KERNEL
);
2225 INIT_LIST_HEAD(&fib_node
->entry_list
);
2226 list_add(&fib_node
->list
, &fib
->node_list
);
2227 memcpy(fib_node
->key
.addr
, addr
, addr_len
);
2228 fib_node
->key
.prefix_len
= prefix_len
;
2233 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node
*fib_node
)
2235 list_del(&fib_node
->list
);
2236 WARN_ON(!list_empty(&fib_node
->entry_list
));
2241 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node
*fib_node
,
2242 const struct mlxsw_sp_fib_entry
*fib_entry
)
2244 return list_first_entry(&fib_node
->entry_list
,
2245 struct mlxsw_sp_fib_entry
, list
) == fib_entry
;
2248 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node
*fib_node
)
2250 unsigned char prefix_len
= fib_node
->key
.prefix_len
;
2251 struct mlxsw_sp_fib
*fib
= fib_node
->fib
;
2253 if (fib
->prefix_ref_count
[prefix_len
]++ == 0)
2254 mlxsw_sp_prefix_usage_set(&fib
->prefix_usage
, prefix_len
);
2257 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node
*fib_node
)
2259 unsigned char prefix_len
= fib_node
->key
.prefix_len
;
2260 struct mlxsw_sp_fib
*fib
= fib_node
->fib
;
2262 if (--fib
->prefix_ref_count
[prefix_len
] == 0)
2263 mlxsw_sp_prefix_usage_clear(&fib
->prefix_usage
, prefix_len
);
2266 static int mlxsw_sp_fib_node_init(struct mlxsw_sp
*mlxsw_sp
,
2267 struct mlxsw_sp_fib_node
*fib_node
,
2268 struct mlxsw_sp_fib
*fib
)
2270 struct mlxsw_sp_prefix_usage req_prefix_usage
;
2271 struct mlxsw_sp_lpm_tree
*lpm_tree
;
2274 err
= mlxsw_sp_fib_node_insert(fib
, fib_node
);
2277 fib_node
->fib
= fib
;
2279 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage
, &fib
->prefix_usage
);
2280 mlxsw_sp_prefix_usage_set(&req_prefix_usage
, fib_node
->key
.prefix_len
);
2282 if (!mlxsw_sp_prefix_usage_none(&fib
->prefix_usage
)) {
2283 err
= mlxsw_sp_vr_lpm_tree_check(mlxsw_sp
, fib
,
2286 goto err_tree_check
;
2288 lpm_tree
= mlxsw_sp_lpm_tree_get(mlxsw_sp
, &req_prefix_usage
,
2290 if (IS_ERR(lpm_tree
))
2291 return PTR_ERR(lpm_tree
);
2292 fib
->lpm_tree
= lpm_tree
;
2293 err
= mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp
, fib
);
2298 mlxsw_sp_fib_node_prefix_inc(fib_node
);
2303 fib
->lpm_tree
= NULL
;
2304 mlxsw_sp_lpm_tree_put(mlxsw_sp
, lpm_tree
);
2306 fib_node
->fib
= NULL
;
2307 mlxsw_sp_fib_node_remove(fib
, fib_node
);
2311 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp
*mlxsw_sp
,
2312 struct mlxsw_sp_fib_node
*fib_node
)
2314 struct mlxsw_sp_lpm_tree
*lpm_tree
= fib_node
->fib
->lpm_tree
;
2315 struct mlxsw_sp_fib
*fib
= fib_node
->fib
;
2317 mlxsw_sp_fib_node_prefix_dec(fib_node
);
2319 if (mlxsw_sp_prefix_usage_none(&fib
->prefix_usage
)) {
2320 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp
, fib
);
2321 fib
->lpm_tree
= NULL
;
2322 mlxsw_sp_lpm_tree_put(mlxsw_sp
, lpm_tree
);
2324 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp
, fib
, &fib
->prefix_usage
);
2327 fib_node
->fib
= NULL
;
2328 mlxsw_sp_fib_node_remove(fib
, fib_node
);
2331 static struct mlxsw_sp_fib_node
*
2332 mlxsw_sp_fib4_node_get(struct mlxsw_sp
*mlxsw_sp
,
2333 const struct fib_entry_notifier_info
*fen_info
)
2335 struct mlxsw_sp_fib_node
*fib_node
;
2336 struct mlxsw_sp_fib
*fib
;
2337 struct mlxsw_sp_vr
*vr
;
2340 vr
= mlxsw_sp_vr_get(mlxsw_sp
, fen_info
->tb_id
);
2342 return ERR_CAST(vr
);
2343 fib
= mlxsw_sp_vr_fib(vr
, MLXSW_SP_L3_PROTO_IPV4
);
2345 fib_node
= mlxsw_sp_fib_node_lookup(fib
, &fen_info
->dst
,
2346 sizeof(fen_info
->dst
),
2351 fib_node
= mlxsw_sp_fib_node_create(fib
, &fen_info
->dst
,
2352 sizeof(fen_info
->dst
),
2356 goto err_fib_node_create
;
2359 err
= mlxsw_sp_fib_node_init(mlxsw_sp
, fib_node
, fib
);
2361 goto err_fib_node_init
;
2366 mlxsw_sp_fib_node_destroy(fib_node
);
2367 err_fib_node_create
:
2368 mlxsw_sp_vr_put(vr
);
2369 return ERR_PTR(err
);
2372 static void mlxsw_sp_fib4_node_put(struct mlxsw_sp
*mlxsw_sp
,
2373 struct mlxsw_sp_fib_node
*fib_node
)
2375 struct mlxsw_sp_vr
*vr
= fib_node
->fib
->vr
;
2377 if (!list_empty(&fib_node
->entry_list
))
2379 mlxsw_sp_fib_node_fini(mlxsw_sp
, fib_node
);
2380 mlxsw_sp_fib_node_destroy(fib_node
);
2381 mlxsw_sp_vr_put(vr
);
2384 static struct mlxsw_sp_fib_entry
*
2385 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node
*fib_node
,
2386 const struct mlxsw_sp_fib_entry_params
*params
)
2388 struct mlxsw_sp_fib_entry
*fib_entry
;
2390 list_for_each_entry(fib_entry
, &fib_node
->entry_list
, list
) {
2391 if (fib_entry
->params
.tb_id
> params
->tb_id
)
2393 if (fib_entry
->params
.tb_id
!= params
->tb_id
)
2395 if (fib_entry
->params
.tos
> params
->tos
)
2397 if (fib_entry
->params
.prio
>= params
->prio
||
2398 fib_entry
->params
.tos
< params
->tos
)
2405 static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry
*fib_entry
,
2406 struct mlxsw_sp_fib_entry
*new_entry
)
2408 struct mlxsw_sp_fib_node
*fib_node
;
2410 if (WARN_ON(!fib_entry
))
2413 fib_node
= fib_entry
->fib_node
;
2414 list_for_each_entry_from(fib_entry
, &fib_node
->entry_list
, list
) {
2415 if (fib_entry
->params
.tb_id
!= new_entry
->params
.tb_id
||
2416 fib_entry
->params
.tos
!= new_entry
->params
.tos
||
2417 fib_entry
->params
.prio
!= new_entry
->params
.prio
)
2421 list_add_tail(&new_entry
->list
, &fib_entry
->list
);
2426 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node
*fib_node
,
2427 struct mlxsw_sp_fib_entry
*new_entry
,
2428 bool replace
, bool append
)
2430 struct mlxsw_sp_fib_entry
*fib_entry
;
2432 fib_entry
= mlxsw_sp_fib4_node_entry_find(fib_node
, &new_entry
->params
);
2435 return mlxsw_sp_fib4_node_list_append(fib_entry
, new_entry
);
2436 if (replace
&& WARN_ON(!fib_entry
))
2439 /* Insert new entry before replaced one, so that we can later
2440 * remove the second.
2443 list_add_tail(&new_entry
->list
, &fib_entry
->list
);
2445 struct mlxsw_sp_fib_entry
*last
;
2447 list_for_each_entry(last
, &fib_node
->entry_list
, list
) {
2448 if (new_entry
->params
.tb_id
> last
->params
.tb_id
)
2454 list_add(&new_entry
->list
, &fib_entry
->list
);
2456 list_add(&new_entry
->list
, &fib_node
->entry_list
);
2463 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry
*fib_entry
)
2465 list_del(&fib_entry
->list
);
2469 mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp
*mlxsw_sp
,
2470 const struct mlxsw_sp_fib_node
*fib_node
,
2471 struct mlxsw_sp_fib_entry
*fib_entry
)
2473 if (!mlxsw_sp_fib_node_entry_is_first(fib_node
, fib_entry
))
2476 /* To prevent packet loss, overwrite the previously offloaded
2479 if (!list_is_singular(&fib_node
->entry_list
)) {
2480 enum mlxsw_reg_ralue_op op
= MLXSW_REG_RALUE_OP_WRITE_DELETE
;
2481 struct mlxsw_sp_fib_entry
*n
= list_next_entry(fib_entry
, list
);
2483 mlxsw_sp_fib_entry_offload_refresh(n
, op
, 0);
2486 return mlxsw_sp_fib_entry_update(mlxsw_sp
, fib_entry
);
2490 mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp
*mlxsw_sp
,
2491 const struct mlxsw_sp_fib_node
*fib_node
,
2492 struct mlxsw_sp_fib_entry
*fib_entry
)
2494 if (!mlxsw_sp_fib_node_entry_is_first(fib_node
, fib_entry
))
2497 /* Promote the next entry by overwriting the deleted entry */
2498 if (!list_is_singular(&fib_node
->entry_list
)) {
2499 struct mlxsw_sp_fib_entry
*n
= list_next_entry(fib_entry
, list
);
2500 enum mlxsw_reg_ralue_op op
= MLXSW_REG_RALUE_OP_WRITE_DELETE
;
2502 mlxsw_sp_fib_entry_update(mlxsw_sp
, n
);
2503 mlxsw_sp_fib_entry_offload_refresh(fib_entry
, op
, 0);
2507 mlxsw_sp_fib_entry_del(mlxsw_sp
, fib_entry
);
2510 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp
*mlxsw_sp
,
2511 struct mlxsw_sp_fib_entry
*fib_entry
,
2512 bool replace
, bool append
)
2514 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2517 err
= mlxsw_sp_fib4_node_list_insert(fib_node
, fib_entry
, replace
,
2522 err
= mlxsw_sp_fib4_node_entry_add(mlxsw_sp
, fib_node
, fib_entry
);
2524 goto err_fib4_node_entry_add
;
2528 err_fib4_node_entry_add
:
2529 mlxsw_sp_fib4_node_list_remove(fib_entry
);
2534 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp
*mlxsw_sp
,
2535 struct mlxsw_sp_fib_entry
*fib_entry
)
2537 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2539 mlxsw_sp_fib4_node_entry_del(mlxsw_sp
, fib_node
, fib_entry
);
2540 mlxsw_sp_fib4_node_list_remove(fib_entry
);
2543 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp
*mlxsw_sp
,
2544 struct mlxsw_sp_fib_entry
*fib_entry
,
2547 struct mlxsw_sp_fib_node
*fib_node
= fib_entry
->fib_node
;
2548 struct mlxsw_sp_fib_entry
*replaced
;
2553 /* We inserted the new entry before replaced one */
2554 replaced
= list_next_entry(fib_entry
, list
);
2556 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, replaced
);
2557 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, replaced
);
2558 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2562 mlxsw_sp_router_fib4_add(struct mlxsw_sp
*mlxsw_sp
,
2563 const struct fib_entry_notifier_info
*fen_info
,
2564 bool replace
, bool append
)
2566 struct mlxsw_sp_fib_entry
*fib_entry
;
2567 struct mlxsw_sp_fib_node
*fib_node
;
2570 if (mlxsw_sp
->router
->aborted
)
2573 fib_node
= mlxsw_sp_fib4_node_get(mlxsw_sp
, fen_info
);
2574 if (IS_ERR(fib_node
)) {
2575 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to get FIB node\n");
2576 return PTR_ERR(fib_node
);
2579 fib_entry
= mlxsw_sp_fib4_entry_create(mlxsw_sp
, fib_node
, fen_info
);
2580 if (IS_ERR(fib_entry
)) {
2581 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to create FIB entry\n");
2582 err
= PTR_ERR(fib_entry
);
2583 goto err_fib4_entry_create
;
2586 err
= mlxsw_sp_fib4_node_entry_link(mlxsw_sp
, fib_entry
, replace
,
2589 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to link FIB entry to node\n");
2590 goto err_fib4_node_entry_link
;
2593 mlxsw_sp_fib4_entry_replace(mlxsw_sp
, fib_entry
, replace
);
2597 err_fib4_node_entry_link
:
2598 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2599 err_fib4_entry_create
:
2600 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2604 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp
*mlxsw_sp
,
2605 struct fib_entry_notifier_info
*fen_info
)
2607 struct mlxsw_sp_fib_entry
*fib_entry
;
2608 struct mlxsw_sp_fib_node
*fib_node
;
2610 if (mlxsw_sp
->router
->aborted
)
2613 fib_entry
= mlxsw_sp_fib4_entry_lookup(mlxsw_sp
, fen_info
);
2614 if (WARN_ON(!fib_entry
))
2616 fib_node
= fib_entry
->fib_node
;
2618 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, fib_entry
);
2619 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2620 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2623 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp
*mlxsw_sp
)
2625 char ralta_pl
[MLXSW_REG_RALTA_LEN
];
2626 char ralst_pl
[MLXSW_REG_RALST_LEN
];
2629 mlxsw_reg_ralta_pack(ralta_pl
, true, MLXSW_REG_RALXX_PROTOCOL_IPV4
,
2630 MLXSW_SP_LPM_TREE_MIN
);
2631 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralta
), ralta_pl
);
2635 mlxsw_reg_ralst_pack(ralst_pl
, 0xff, MLXSW_SP_LPM_TREE_MIN
);
2636 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralst
), ralst_pl
);
2640 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
2641 struct mlxsw_sp_vr
*vr
= &mlxsw_sp
->router
->vrs
[i
];
2642 char raltb_pl
[MLXSW_REG_RALTB_LEN
];
2643 char ralue_pl
[MLXSW_REG_RALUE_LEN
];
2645 if (!mlxsw_sp_vr_is_used(vr
))
2648 mlxsw_reg_raltb_pack(raltb_pl
, vr
->id
,
2649 MLXSW_REG_RALXX_PROTOCOL_IPV4
,
2650 MLXSW_SP_LPM_TREE_MIN
);
2651 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(raltb
),
2656 mlxsw_reg_ralue_pack4(ralue_pl
, MLXSW_SP_L3_PROTO_IPV4
,
2657 MLXSW_REG_RALUE_OP_WRITE_WRITE
, vr
->id
, 0,
2659 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl
);
2660 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ralue
),
2669 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp
*mlxsw_sp
,
2670 struct mlxsw_sp_fib_node
*fib_node
)
2672 struct mlxsw_sp_fib_entry
*fib_entry
, *tmp
;
2674 list_for_each_entry_safe(fib_entry
, tmp
, &fib_node
->entry_list
, list
) {
2675 bool do_break
= &tmp
->list
== &fib_node
->entry_list
;
2677 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp
, fib_entry
);
2678 mlxsw_sp_fib4_entry_destroy(mlxsw_sp
, fib_entry
);
2679 mlxsw_sp_fib4_node_put(mlxsw_sp
, fib_node
);
2680 /* Break when entry list is empty and node was freed.
2681 * Otherwise, we'll access freed memory in the next
2689 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp
*mlxsw_sp
,
2690 struct mlxsw_sp_fib_node
*fib_node
)
2692 switch (fib_node
->fib
->proto
) {
2693 case MLXSW_SP_L3_PROTO_IPV4
:
2694 mlxsw_sp_fib4_node_flush(mlxsw_sp
, fib_node
);
2696 case MLXSW_SP_L3_PROTO_IPV6
:
2702 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp
*mlxsw_sp
,
2703 struct mlxsw_sp_vr
*vr
,
2704 enum mlxsw_sp_l3proto proto
)
2706 struct mlxsw_sp_fib
*fib
= mlxsw_sp_vr_fib(vr
, proto
);
2707 struct mlxsw_sp_fib_node
*fib_node
, *tmp
;
2709 list_for_each_entry_safe(fib_node
, tmp
, &fib
->node_list
, list
) {
2710 bool do_break
= &tmp
->list
== &fib
->node_list
;
2712 mlxsw_sp_fib_node_flush(mlxsw_sp
, fib_node
);
2718 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp
*mlxsw_sp
)
2722 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_VRS
); i
++) {
2723 struct mlxsw_sp_vr
*vr
= &mlxsw_sp
->router
->vrs
[i
];
2725 if (!mlxsw_sp_vr_is_used(vr
))
2727 mlxsw_sp_vr_fib_flush(mlxsw_sp
, vr
, MLXSW_SP_L3_PROTO_IPV4
);
2731 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp
*mlxsw_sp
)
2735 if (mlxsw_sp
->router
->aborted
)
2737 dev_warn(mlxsw_sp
->bus_info
->dev
, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
2738 mlxsw_sp_router_fib_flush(mlxsw_sp
);
2739 mlxsw_sp
->router
->aborted
= true;
2740 err
= mlxsw_sp_router_set_abort_trap(mlxsw_sp
);
2742 dev_warn(mlxsw_sp
->bus_info
->dev
, "Failed to set abort trap.\n");
2745 struct mlxsw_sp_fib_event_work
{
2746 struct work_struct work
;
2748 struct fib_entry_notifier_info fen_info
;
2749 struct fib_rule_notifier_info fr_info
;
2750 struct fib_nh_notifier_info fnh_info
;
2752 struct mlxsw_sp
*mlxsw_sp
;
2753 unsigned long event
;
2756 static void mlxsw_sp_router_fib_event_work(struct work_struct
*work
)
2758 struct mlxsw_sp_fib_event_work
*fib_work
=
2759 container_of(work
, struct mlxsw_sp_fib_event_work
, work
);
2760 struct mlxsw_sp
*mlxsw_sp
= fib_work
->mlxsw_sp
;
2761 struct fib_rule
*rule
;
2762 bool replace
, append
;
2765 /* Protect internal structures from changes */
2767 switch (fib_work
->event
) {
2768 case FIB_EVENT_ENTRY_REPLACE
: /* fall through */
2769 case FIB_EVENT_ENTRY_APPEND
: /* fall through */
2770 case FIB_EVENT_ENTRY_ADD
:
2771 replace
= fib_work
->event
== FIB_EVENT_ENTRY_REPLACE
;
2772 append
= fib_work
->event
== FIB_EVENT_ENTRY_APPEND
;
2773 err
= mlxsw_sp_router_fib4_add(mlxsw_sp
, &fib_work
->fen_info
,
2776 mlxsw_sp_router_fib4_abort(mlxsw_sp
);
2777 fib_info_put(fib_work
->fen_info
.fi
);
2779 case FIB_EVENT_ENTRY_DEL
:
2780 mlxsw_sp_router_fib4_del(mlxsw_sp
, &fib_work
->fen_info
);
2781 fib_info_put(fib_work
->fen_info
.fi
);
2783 case FIB_EVENT_RULE_ADD
: /* fall through */
2784 case FIB_EVENT_RULE_DEL
:
2785 rule
= fib_work
->fr_info
.rule
;
2786 if (!fib4_rule_default(rule
) && !rule
->l3mdev
)
2787 mlxsw_sp_router_fib4_abort(mlxsw_sp
);
2790 case FIB_EVENT_NH_ADD
: /* fall through */
2791 case FIB_EVENT_NH_DEL
:
2792 mlxsw_sp_nexthop_event(mlxsw_sp
, fib_work
->event
,
2793 fib_work
->fnh_info
.fib_nh
);
2794 fib_info_put(fib_work
->fnh_info
.fib_nh
->nh_parent
);
2801 /* Called with rcu_read_lock() */
2802 static int mlxsw_sp_router_fib_event(struct notifier_block
*nb
,
2803 unsigned long event
, void *ptr
)
2805 struct mlxsw_sp_fib_event_work
*fib_work
;
2806 struct fib_notifier_info
*info
= ptr
;
2807 struct mlxsw_sp_router
*router
;
2809 if (!net_eq(info
->net
, &init_net
))
2812 fib_work
= kzalloc(sizeof(*fib_work
), GFP_ATOMIC
);
2813 if (WARN_ON(!fib_work
))
2816 INIT_WORK(&fib_work
->work
, mlxsw_sp_router_fib_event_work
);
2817 router
= container_of(nb
, struct mlxsw_sp_router
, fib_nb
);
2818 fib_work
->mlxsw_sp
= router
->mlxsw_sp
;
2819 fib_work
->event
= event
;
2822 case FIB_EVENT_ENTRY_REPLACE
: /* fall through */
2823 case FIB_EVENT_ENTRY_APPEND
: /* fall through */
2824 case FIB_EVENT_ENTRY_ADD
: /* fall through */
2825 case FIB_EVENT_ENTRY_DEL
:
2826 memcpy(&fib_work
->fen_info
, ptr
, sizeof(fib_work
->fen_info
));
2827 /* Take referece on fib_info to prevent it from being
2828 * freed while work is queued. Release it afterwards.
2830 fib_info_hold(fib_work
->fen_info
.fi
);
2832 case FIB_EVENT_RULE_ADD
: /* fall through */
2833 case FIB_EVENT_RULE_DEL
:
2834 memcpy(&fib_work
->fr_info
, ptr
, sizeof(fib_work
->fr_info
));
2835 fib_rule_get(fib_work
->fr_info
.rule
);
2837 case FIB_EVENT_NH_ADD
: /* fall through */
2838 case FIB_EVENT_NH_DEL
:
2839 memcpy(&fib_work
->fnh_info
, ptr
, sizeof(fib_work
->fnh_info
));
2840 fib_info_hold(fib_work
->fnh_info
.fib_nh
->nh_parent
);
2844 mlxsw_core_schedule_work(&fib_work
->work
);
2849 static struct mlxsw_sp_rif
*
2850 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp
*mlxsw_sp
,
2851 const struct net_device
*dev
)
2855 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
2856 if (mlxsw_sp
->router
->rifs
[i
] &&
2857 mlxsw_sp
->router
->rifs
[i
]->dev
== dev
)
2858 return mlxsw_sp
->router
->rifs
[i
];
2863 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp
*mlxsw_sp
, u16 rif
)
2865 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2868 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif
);
2869 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2870 if (WARN_ON_ONCE(err
))
2873 mlxsw_reg_ritr_enable_set(ritr_pl
, false);
2874 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2877 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp
*mlxsw_sp
,
2878 struct mlxsw_sp_rif
*rif
)
2880 mlxsw_sp_router_rif_disable(mlxsw_sp
, rif
->rif_index
);
2881 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp
, rif
);
2882 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp
, rif
);
2885 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif
*rif
,
2886 const struct in_device
*in_dev
,
2887 unsigned long event
)
2895 if (rif
&& !in_dev
->ifa_list
&&
2896 !netif_is_l3_slave(rif
->dev
))
2898 /* It is possible we already removed the RIF ourselves
2899 * if it was assigned to a netdev that is now a bridge
2908 #define MLXSW_SP_INVALID_INDEX_RIF 0xffff
2909 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp
*mlxsw_sp
)
2913 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
2914 if (!mlxsw_sp
->router
->rifs
[i
])
2917 return MLXSW_SP_INVALID_INDEX_RIF
;
2920 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2921 bool *p_lagged
, u16
*p_system_port
)
2923 u8 local_port
= mlxsw_sp_vport
->local_port
;
2925 *p_lagged
= mlxsw_sp_vport
->lagged
;
2926 *p_system_port
= *p_lagged
? mlxsw_sp_vport
->lag_id
: local_port
;
2929 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2930 u16 vr_id
, struct net_device
*l3_dev
,
2931 u16 rif_index
, bool create
)
2933 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2934 bool lagged
= mlxsw_sp_vport
->lagged
;
2935 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2938 mlxsw_reg_ritr_pack(ritr_pl
, create
, MLXSW_REG_RITR_SP_IF
, rif_index
,
2939 vr_id
, l3_dev
->mtu
, l3_dev
->dev_addr
);
2941 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport
, &lagged
, &system_port
);
2942 mlxsw_reg_ritr_sp_if_pack(ritr_pl
, lagged
, system_port
,
2943 mlxsw_sp_vport_vid_get(mlxsw_sp_vport
));
2945 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2948 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
2950 static u16
mlxsw_sp_rif_sp_to_fid(u16 rif_index
)
2952 return MLXSW_SP_RFID_BASE
+ rif_index
;
2955 static struct mlxsw_sp_fid
*
2956 mlxsw_sp_rfid_alloc(u16 fid
, struct net_device
*l3_dev
)
2958 struct mlxsw_sp_fid
*f
;
2960 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
2964 f
->leave
= mlxsw_sp_vport_rif_sp_leave
;
2972 static struct mlxsw_sp_rif
*
2973 mlxsw_sp_rif_alloc(u16 rif_index
, u16 vr_id
, struct net_device
*l3_dev
,
2974 struct mlxsw_sp_fid
*f
)
2976 struct mlxsw_sp_rif
*rif
;
2978 rif
= kzalloc(sizeof(*rif
), GFP_KERNEL
);
2982 INIT_LIST_HEAD(&rif
->nexthop_list
);
2983 INIT_LIST_HEAD(&rif
->neigh_list
);
2984 ether_addr_copy(rif
->addr
, l3_dev
->dev_addr
);
2985 rif
->mtu
= l3_dev
->mtu
;
2988 rif
->rif_index
= rif_index
;
2994 struct mlxsw_sp_rif
*mlxsw_sp_rif_by_index(const struct mlxsw_sp
*mlxsw_sp
,
2997 return mlxsw_sp
->router
->rifs
[rif_index
];
3000 u16
mlxsw_sp_rif_index(const struct mlxsw_sp_rif
*rif
)
3002 return rif
->rif_index
;
3005 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif
*rif
)
3007 return rif
->dev
->ifindex
;
3010 static struct mlxsw_sp_rif
*
3011 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3012 struct net_device
*l3_dev
)
3014 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3015 u32 tb_id
= l3mdev_fib_table(l3_dev
);
3016 struct mlxsw_sp_vr
*vr
;
3017 struct mlxsw_sp_fid
*f
;
3018 struct mlxsw_sp_rif
*rif
;
3022 rif_index
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
3023 if (rif_index
== MLXSW_SP_INVALID_INDEX_RIF
)
3024 return ERR_PTR(-ERANGE
);
3026 vr
= mlxsw_sp_vr_get(mlxsw_sp
, tb_id
? : RT_TABLE_MAIN
);
3028 return ERR_CAST(vr
);
3030 err
= mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, vr
->id
, l3_dev
,
3033 goto err_vport_rif_sp_op
;
3035 fid
= mlxsw_sp_rif_sp_to_fid(rif_index
);
3036 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, true);
3038 goto err_rif_fdb_op
;
3040 f
= mlxsw_sp_rfid_alloc(fid
, l3_dev
);
3043 goto err_rfid_alloc
;
3046 rif
= mlxsw_sp_rif_alloc(rif_index
, vr
->id
, l3_dev
, f
);
3052 if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp
->core
),
3053 MLXSW_SP_DPIPE_TABLE_NAME_ERIF
)) {
3054 err
= mlxsw_sp_rif_counter_alloc(mlxsw_sp
, rif
,
3055 MLXSW_SP_RIF_COUNTER_EGRESS
);
3057 netdev_dbg(mlxsw_sp_vport
->dev
,
3058 "Counter alloc Failed err=%d\n", err
);
3062 mlxsw_sp
->router
->rifs
[rif_index
] = rif
;
3070 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
3072 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, vr
->id
, l3_dev
, rif_index
,
3074 err_vport_rif_sp_op
:
3075 mlxsw_sp_vr_put(vr
);
3076 return ERR_PTR(err
);
3079 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3080 struct mlxsw_sp_rif
*rif
)
3082 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3083 struct mlxsw_sp_vr
*vr
= &mlxsw_sp
->router
->vrs
[rif
->vr_id
];
3084 struct net_device
*l3_dev
= rif
->dev
;
3085 struct mlxsw_sp_fid
*f
= rif
->f
;
3086 u16 rif_index
= rif
->rif_index
;
3089 mlxsw_sp_router_rif_gone_sync(mlxsw_sp
, rif
);
3091 mlxsw_sp_rif_counter_free(mlxsw_sp
, rif
, MLXSW_SP_RIF_COUNTER_EGRESS
);
3092 mlxsw_sp_rif_counter_free(mlxsw_sp
, rif
, MLXSW_SP_RIF_COUNTER_INGRESS
);
3095 mlxsw_sp
->router
->rifs
[rif_index
] = NULL
;
3102 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
3104 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, vr
->id
, l3_dev
, rif_index
,
3106 mlxsw_sp_vr_put(vr
);
3109 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3110 struct net_device
*l3_dev
)
3112 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3113 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3114 struct mlxsw_sp_port
*mlxsw_sp_port
;
3115 struct mlxsw_sp_rif
*rif
;
3118 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3120 rif
= mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport
, l3_dev
);
3122 return PTR_ERR(rif
);
3125 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
3127 goto err_port_vid_learning_set
;
3129 err
= mlxsw_sp_port_vid_stp_set(mlxsw_sp_vport
, vid
,
3130 BR_STATE_FORWARDING
);
3132 goto err_port_vid_stp_set
;
3134 mlxsw_sp_port
= mlxsw_sp_vport_port(mlxsw_sp_vport
);
3135 if (mlxsw_sp_port
->nr_port_vid_map
++ == 0) {
3136 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
3138 goto err_port_vp_mode_trans
;
3141 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, rif
->f
);
3142 rif
->f
->ref_count
++;
3144 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", rif
->f
->fid
);
3148 err_port_vp_mode_trans
:
3149 mlxsw_sp_port
->nr_port_vid_map
--;
3150 mlxsw_sp_port_vid_stp_set(mlxsw_sp_vport
, vid
, BR_STATE_BLOCKING
);
3151 err_port_vid_stp_set
:
3152 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
3153 err_port_vid_learning_set
:
3154 if (rif
->f
->ref_count
== 0)
3155 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport
, rif
);
3159 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
3161 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3162 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3163 struct mlxsw_sp_port
*mlxsw_sp_port
;
3165 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
3168 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
3170 mlxsw_sp_port
= mlxsw_sp_vport_port(mlxsw_sp_vport
);
3171 if (mlxsw_sp_port
->nr_port_vid_map
== 1)
3172 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
3173 mlxsw_sp_port
->nr_port_vid_map
--;
3174 mlxsw_sp_port_vid_stp_set(mlxsw_sp_vport
, vid
, BR_STATE_BLOCKING
);
3175 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
3177 if (f
->ref_count
== 0)
3178 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport
, f
->rif
);
3181 static int mlxsw_sp_inetaddr_vport_event(struct net_device
*l3_dev
,
3182 struct net_device
*port_dev
,
3183 unsigned long event
, u16 vid
)
3185 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(port_dev
);
3186 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3188 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3189 if (WARN_ON(!mlxsw_sp_vport
))
3194 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport
, l3_dev
);
3196 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport
);
3203 static int mlxsw_sp_inetaddr_port_event(struct net_device
*port_dev
,
3204 unsigned long event
)
3206 if (netif_is_bridge_port(port_dev
) ||
3207 netif_is_lag_port(port_dev
) ||
3208 netif_is_ovs_port(port_dev
))
3211 return mlxsw_sp_inetaddr_vport_event(port_dev
, port_dev
, event
, 1);
3214 static int __mlxsw_sp_inetaddr_lag_event(struct net_device
*l3_dev
,
3215 struct net_device
*lag_dev
,
3216 unsigned long event
, u16 vid
)
3218 struct net_device
*port_dev
;
3219 struct list_head
*iter
;
3222 netdev_for_each_lower_dev(lag_dev
, port_dev
, iter
) {
3223 if (mlxsw_sp_port_dev_check(port_dev
)) {
3224 err
= mlxsw_sp_inetaddr_vport_event(l3_dev
, port_dev
,
3234 static int mlxsw_sp_inetaddr_lag_event(struct net_device
*lag_dev
,
3235 unsigned long event
)
3237 if (netif_is_bridge_port(lag_dev
))
3240 return __mlxsw_sp_inetaddr_lag_event(lag_dev
, lag_dev
, event
, 1);
3243 static struct mlxsw_sp_fid
*mlxsw_sp_bridge_fid_get(struct mlxsw_sp
*mlxsw_sp
,
3244 struct net_device
*l3_dev
)
3248 if (is_vlan_dev(l3_dev
))
3249 fid
= vlan_dev_vlan_id(l3_dev
);
3250 else if (mlxsw_sp_master_bridge(mlxsw_sp
)->dev
== l3_dev
)
3253 return mlxsw_sp_vfid_find(mlxsw_sp
, l3_dev
);
3255 return mlxsw_sp_fid_find(mlxsw_sp
, fid
);
3258 static u8
mlxsw_sp_router_port(const struct mlxsw_sp
*mlxsw_sp
)
3260 return mlxsw_core_max_ports(mlxsw_sp
->core
) + 1;
3263 static enum mlxsw_flood_table_type
mlxsw_sp_flood_table_type_get(u16 fid
)
3265 return mlxsw_sp_fid_is_vfid(fid
) ? MLXSW_REG_SFGC_TABLE_TYPE_FID
:
3266 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
3269 static u16
mlxsw_sp_flood_table_index_get(u16 fid
)
3271 return mlxsw_sp_fid_is_vfid(fid
) ? mlxsw_sp_fid_to_vfid(fid
) : fid
;
3274 static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp
*mlxsw_sp
, u16 fid
,
3277 u8 router_port
= mlxsw_sp_router_port(mlxsw_sp
);
3278 enum mlxsw_flood_table_type table_type
;
3283 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
3287 table_type
= mlxsw_sp_flood_table_type_get(fid
);
3288 index
= mlxsw_sp_flood_table_index_get(fid
);
3289 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BC
, index
, table_type
,
3290 1, router_port
, set
);
3291 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
3297 static enum mlxsw_reg_ritr_if_type
mlxsw_sp_rif_type_get(u16 fid
)
3299 if (mlxsw_sp_fid_is_vfid(fid
))
3300 return MLXSW_REG_RITR_FID_IF
;
3302 return MLXSW_REG_RITR_VLAN_IF
;
3305 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp
*mlxsw_sp
, u16 vr_id
,
3306 struct net_device
*l3_dev
,
3310 enum mlxsw_reg_ritr_if_type rif_type
;
3311 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3313 rif_type
= mlxsw_sp_rif_type_get(fid
);
3314 mlxsw_reg_ritr_pack(ritr_pl
, create
, rif_type
, rif
, vr_id
, l3_dev
->mtu
,
3316 mlxsw_reg_ritr_fid_set(ritr_pl
, rif_type
, fid
);
3318 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3321 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp
*mlxsw_sp
,
3322 struct net_device
*l3_dev
,
3323 struct mlxsw_sp_fid
*f
)
3325 u32 tb_id
= l3mdev_fib_table(l3_dev
);
3326 struct mlxsw_sp_rif
*rif
;
3327 struct mlxsw_sp_vr
*vr
;
3331 rif_index
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
3332 if (rif_index
== MLXSW_SP_INVALID_INDEX_RIF
)
3335 vr
= mlxsw_sp_vr_get(mlxsw_sp
, tb_id
? : RT_TABLE_MAIN
);
3339 err
= mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, true);
3341 goto err_port_flood_set
;
3343 err
= mlxsw_sp_rif_bridge_op(mlxsw_sp
, vr
->id
, l3_dev
, f
->fid
,
3346 goto err_rif_bridge_op
;
3348 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, true);
3350 goto err_rif_fdb_op
;
3352 rif
= mlxsw_sp_rif_alloc(rif_index
, vr
->id
, l3_dev
, f
);
3359 mlxsw_sp
->router
->rifs
[rif_index
] = rif
;
3362 netdev_dbg(l3_dev
, "RIF=%d created\n", rif_index
);
3367 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
3369 mlxsw_sp_rif_bridge_op(mlxsw_sp
, vr
->id
, l3_dev
, f
->fid
, rif_index
,
3372 mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, false);
3374 mlxsw_sp_vr_put(vr
);
3378 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp
*mlxsw_sp
,
3379 struct mlxsw_sp_rif
*rif
)
3381 struct mlxsw_sp_vr
*vr
= &mlxsw_sp
->router
->vrs
[rif
->vr_id
];
3382 struct net_device
*l3_dev
= rif
->dev
;
3383 struct mlxsw_sp_fid
*f
= rif
->f
;
3384 u16 rif_index
= rif
->rif_index
;
3386 mlxsw_sp_router_rif_gone_sync(mlxsw_sp
, rif
);
3389 mlxsw_sp
->router
->rifs
[rif_index
] = NULL
;
3394 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
3396 mlxsw_sp_rif_bridge_op(mlxsw_sp
, vr
->id
, l3_dev
, f
->fid
, rif_index
,
3399 mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, false);
3401 mlxsw_sp_vr_put(vr
);
3403 netdev_dbg(l3_dev
, "RIF=%d destroyed\n", rif_index
);
3406 static int mlxsw_sp_inetaddr_bridge_event(struct net_device
*l3_dev
,
3407 struct net_device
*br_dev
,
3408 unsigned long event
)
3410 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
3411 struct mlxsw_sp_fid
*f
;
3413 /* FID can either be an actual FID if the L3 device is the
3414 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3415 * L3 device is a VLAN-unaware bridge and we get a vFID.
3417 f
= mlxsw_sp_bridge_fid_get(mlxsw_sp
, l3_dev
);
3423 return mlxsw_sp_rif_bridge_create(mlxsw_sp
, l3_dev
, f
);
3425 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->rif
);
3432 static int mlxsw_sp_inetaddr_vlan_event(struct net_device
*vlan_dev
,
3433 unsigned long event
)
3435 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3436 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
3437 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3439 if (mlxsw_sp_port_dev_check(real_dev
))
3440 return mlxsw_sp_inetaddr_vport_event(vlan_dev
, real_dev
, event
,
3442 else if (netif_is_lag_master(real_dev
))
3443 return __mlxsw_sp_inetaddr_lag_event(vlan_dev
, real_dev
, event
,
3445 else if (netif_is_bridge_master(real_dev
) &&
3446 mlxsw_sp_master_bridge(mlxsw_sp
)->dev
== real_dev
)
3447 return mlxsw_sp_inetaddr_bridge_event(vlan_dev
, real_dev
,
3453 static int __mlxsw_sp_inetaddr_event(struct net_device
*dev
,
3454 unsigned long event
)
3456 if (mlxsw_sp_port_dev_check(dev
))
3457 return mlxsw_sp_inetaddr_port_event(dev
, event
);
3458 else if (netif_is_lag_master(dev
))
3459 return mlxsw_sp_inetaddr_lag_event(dev
, event
);
3460 else if (netif_is_bridge_master(dev
))
3461 return mlxsw_sp_inetaddr_bridge_event(dev
, dev
, event
);
3462 else if (is_vlan_dev(dev
))
3463 return mlxsw_sp_inetaddr_vlan_event(dev
, event
);
3468 int mlxsw_sp_inetaddr_event(struct notifier_block
*unused
,
3469 unsigned long event
, void *ptr
)
3471 struct in_ifaddr
*ifa
= (struct in_ifaddr
*) ptr
;
3472 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
3473 struct mlxsw_sp
*mlxsw_sp
;
3474 struct mlxsw_sp_rif
*rif
;
3477 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3481 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3482 if (!mlxsw_sp_rif_should_config(rif
, ifa
->ifa_dev
, event
))
3485 err
= __mlxsw_sp_inetaddr_event(dev
, event
);
3487 return notifier_from_errno(err
);
3490 static int mlxsw_sp_rif_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif_index
,
3491 const char *mac
, int mtu
)
3493 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3496 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif_index
);
3497 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3501 mlxsw_reg_ritr_mtu_set(ritr_pl
, mtu
);
3502 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl
, mac
);
3503 mlxsw_reg_ritr_op_set(ritr_pl
, MLXSW_REG_RITR_RIF_CREATE
);
3504 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3507 int mlxsw_sp_netdevice_router_port_event(struct net_device
*dev
)
3509 struct mlxsw_sp
*mlxsw_sp
;
3510 struct mlxsw_sp_rif
*rif
;
3513 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3517 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3521 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, rif
->addr
, rif
->f
->fid
, false);
3525 err
= mlxsw_sp_rif_edit(mlxsw_sp
, rif
->rif_index
, dev
->dev_addr
,
3530 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, dev
->dev_addr
, rif
->f
->fid
, true);
3532 goto err_rif_fdb_op
;
3534 ether_addr_copy(rif
->addr
, dev
->dev_addr
);
3535 rif
->mtu
= dev
->mtu
;
3537 netdev_dbg(dev
, "Updated RIF=%d\n", rif
->rif_index
);
3542 mlxsw_sp_rif_edit(mlxsw_sp
, rif
->rif_index
, rif
->addr
, rif
->mtu
);
3544 mlxsw_sp_rif_fdb_op(mlxsw_sp
, rif
->addr
, rif
->f
->fid
, true);
3548 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp
*mlxsw_sp
,
3549 struct net_device
*l3_dev
)
3551 struct mlxsw_sp_rif
*rif
;
3553 /* If netdev is already associated with a RIF, then we need to
3554 * destroy it and create a new one with the new virtual router ID.
3556 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3558 __mlxsw_sp_inetaddr_event(l3_dev
, NETDEV_DOWN
);
3560 return __mlxsw_sp_inetaddr_event(l3_dev
, NETDEV_UP
);
3563 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp
*mlxsw_sp
,
3564 struct net_device
*l3_dev
)
3566 struct mlxsw_sp_rif
*rif
;
3568 rif
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3571 __mlxsw_sp_inetaddr_event(l3_dev
, NETDEV_DOWN
);
3574 int mlxsw_sp_netdevice_vrf_event(struct net_device
*l3_dev
, unsigned long event
,
3575 struct netdev_notifier_changeupper_info
*info
)
3577 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
3584 case NETDEV_PRECHANGEUPPER
:
3586 case NETDEV_CHANGEUPPER
:
3588 err
= mlxsw_sp_port_vrf_join(mlxsw_sp
, l3_dev
);
3590 mlxsw_sp_port_vrf_leave(mlxsw_sp
, l3_dev
);
3597 static int mlxsw_sp_rifs_init(struct mlxsw_sp
*mlxsw_sp
)
3599 u64 max_rifs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
);
3601 mlxsw_sp
->router
->rifs
= kcalloc(max_rifs
,
3602 sizeof(struct mlxsw_sp_rif
*),
3604 if (!mlxsw_sp
->router
->rifs
)
3609 static void mlxsw_sp_rifs_fini(struct mlxsw_sp
*mlxsw_sp
)
3613 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
3614 WARN_ON_ONCE(mlxsw_sp
->router
->rifs
[i
]);
3616 kfree(mlxsw_sp
->router
->rifs
);
3619 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block
*nb
)
3621 struct mlxsw_sp_router
*router
;
3623 /* Flush pending FIB notifications and then flush the device's
3624 * table before requesting another dump. The FIB notification
3625 * block is unregistered, so no need to take RTNL.
3627 mlxsw_core_flush_owq();
3628 router
= container_of(nb
, struct mlxsw_sp_router
, fib_nb
);
3629 mlxsw_sp_router_fib_flush(router
->mlxsw_sp
);
3632 static int __mlxsw_sp_router_init(struct mlxsw_sp
*mlxsw_sp
)
3634 char rgcr_pl
[MLXSW_REG_RGCR_LEN
];
3638 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_RIFS
))
3640 max_rifs
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
);
3642 mlxsw_reg_rgcr_pack(rgcr_pl
, true);
3643 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl
, max_rifs
);
3644 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rgcr
), rgcr_pl
);
3650 static void __mlxsw_sp_router_fini(struct mlxsw_sp
*mlxsw_sp
)
3652 char rgcr_pl
[MLXSW_REG_RGCR_LEN
];
3654 mlxsw_reg_rgcr_pack(rgcr_pl
, false);
3655 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(rgcr
), rgcr_pl
);
3658 int mlxsw_sp_router_init(struct mlxsw_sp
*mlxsw_sp
)
3660 struct mlxsw_sp_router
*router
;
3663 router
= kzalloc(sizeof(*mlxsw_sp
->router
), GFP_KERNEL
);
3666 mlxsw_sp
->router
= router
;
3667 router
->mlxsw_sp
= mlxsw_sp
;
3669 INIT_LIST_HEAD(&mlxsw_sp
->router
->nexthop_neighs_list
);
3670 err
= __mlxsw_sp_router_init(mlxsw_sp
);
3672 goto err_router_init
;
3674 err
= mlxsw_sp_rifs_init(mlxsw_sp
);
3678 err
= rhashtable_init(&mlxsw_sp
->router
->nexthop_ht
,
3679 &mlxsw_sp_nexthop_ht_params
);
3681 goto err_nexthop_ht_init
;
3683 err
= rhashtable_init(&mlxsw_sp
->router
->nexthop_group_ht
,
3684 &mlxsw_sp_nexthop_group_ht_params
);
3686 goto err_nexthop_group_ht_init
;
3688 err
= mlxsw_sp_lpm_init(mlxsw_sp
);
3692 err
= mlxsw_sp_vrs_init(mlxsw_sp
);
3696 err
= mlxsw_sp_neigh_init(mlxsw_sp
);
3698 goto err_neigh_init
;
3700 mlxsw_sp
->router
->fib_nb
.notifier_call
= mlxsw_sp_router_fib_event
;
3701 err
= register_fib_notifier(&mlxsw_sp
->router
->fib_nb
,
3702 mlxsw_sp_router_fib_dump_flush
);
3704 goto err_register_fib_notifier
;
3708 err_register_fib_notifier
:
3709 mlxsw_sp_neigh_fini(mlxsw_sp
);
3711 mlxsw_sp_vrs_fini(mlxsw_sp
);
3713 mlxsw_sp_lpm_fini(mlxsw_sp
);
3715 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_group_ht
);
3716 err_nexthop_group_ht_init
:
3717 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_ht
);
3718 err_nexthop_ht_init
:
3719 mlxsw_sp_rifs_fini(mlxsw_sp
);
3721 __mlxsw_sp_router_fini(mlxsw_sp
);
3723 kfree(mlxsw_sp
->router
);
3727 void mlxsw_sp_router_fini(struct mlxsw_sp
*mlxsw_sp
)
3729 unregister_fib_notifier(&mlxsw_sp
->router
->fib_nb
);
3730 mlxsw_sp_neigh_fini(mlxsw_sp
);
3731 mlxsw_sp_vrs_fini(mlxsw_sp
);
3732 mlxsw_sp_lpm_fini(mlxsw_sp
);
3733 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_group_ht
);
3734 rhashtable_destroy(&mlxsw_sp
->router
->nexthop_ht
);
3735 mlxsw_sp_rifs_fini(mlxsw_sp
);
3736 __mlxsw_sp_router_fini(mlxsw_sp
);
3737 kfree(mlxsw_sp
->router
);