]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
mlxsw: spectrum_router: Set abort trap for IPv6
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_router.c
CommitLineData
464dce18
IS
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
c723c735 6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
464dce18
IS
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
5e9c16cc
JP
39#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
c723c735 42#include <linux/notifier.h>
df6dd79b 43#include <linux/inetdevice.h>
9db032bb 44#include <linux/netdevice.h>
03ea01e9 45#include <linux/if_bridge.h>
c723c735 46#include <net/netevent.h>
6cf3c971
JP
47#include <net/neighbour.h>
48#include <net/arp.h>
b45f64d1 49#include <net/ip_fib.h>
5d7bfd14 50#include <net/fib_rules.h>
57837885 51#include <net/l3mdev.h>
5ea1237f 52#include <net/addrconf.h>
d5eb89cf
AS
53#include <net/ndisc.h>
54#include <net/ipv6.h>
464dce18
IS
55
56#include "spectrum.h"
57#include "core.h"
58#include "reg.h"
e0c0afd8
AS
59#include "spectrum_cnt.h"
60#include "spectrum_dpipe.h"
61#include "spectrum_router.h"
464dce18 62
9011b677
IS
63struct mlxsw_sp_vr;
64struct mlxsw_sp_lpm_tree;
e4f3c1c1 65struct mlxsw_sp_rif_ops;
9011b677
IS
66
67struct mlxsw_sp_router {
68 struct mlxsw_sp *mlxsw_sp;
5f9efffb 69 struct mlxsw_sp_rif **rifs;
9011b677
IS
70 struct mlxsw_sp_vr *vrs;
71 struct rhashtable neigh_ht;
72 struct rhashtable nexthop_group_ht;
73 struct rhashtable nexthop_ht;
74 struct {
75 struct mlxsw_sp_lpm_tree *trees;
76 unsigned int tree_count;
77 } lpm;
78 struct {
79 struct delayed_work dw;
80 unsigned long interval; /* ms */
81 } neighs_update;
82 struct delayed_work nexthop_probe_dw;
83#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
84 struct list_head nexthop_neighs_list;
85 bool aborted;
7e39d115 86 struct notifier_block fib_nb;
e4f3c1c1 87 const struct mlxsw_sp_rif_ops **rif_ops_arr;
9011b677
IS
88};
89
4724ba56
IS
90struct mlxsw_sp_rif {
91 struct list_head nexthop_list;
92 struct list_head neigh_list;
93 struct net_device *dev;
a1107487 94 struct mlxsw_sp_fid *fid;
4724ba56
IS
95 unsigned char addr[ETH_ALEN];
96 int mtu;
bf95233e 97 u16 rif_index;
6913229e 98 u16 vr_id;
e4f3c1c1
IS
99 const struct mlxsw_sp_rif_ops *ops;
100 struct mlxsw_sp *mlxsw_sp;
101
e0c0afd8
AS
102 unsigned int counter_ingress;
103 bool counter_ingress_valid;
104 unsigned int counter_egress;
105 bool counter_egress_valid;
4724ba56
IS
106};
107
e4f3c1c1
IS
108struct mlxsw_sp_rif_params {
109 struct net_device *dev;
110 union {
111 u16 system_port;
112 u16 lag_id;
113 };
114 u16 vid;
115 bool lag;
116};
117
4d93ceeb
IS
118struct mlxsw_sp_rif_subport {
119 struct mlxsw_sp_rif common;
120 union {
121 u16 system_port;
122 u16 lag_id;
123 };
124 u16 vid;
125 bool lag;
126};
127
e4f3c1c1
IS
128struct mlxsw_sp_rif_ops {
129 enum mlxsw_sp_rif_type type;
130 size_t rif_size;
131
132 void (*setup)(struct mlxsw_sp_rif *rif,
133 const struct mlxsw_sp_rif_params *params);
134 int (*configure)(struct mlxsw_sp_rif *rif);
135 void (*deconfigure)(struct mlxsw_sp_rif *rif);
136 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
137};
138
e0c0afd8
AS
139static unsigned int *
140mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
141 enum mlxsw_sp_rif_counter_dir dir)
142{
143 switch (dir) {
144 case MLXSW_SP_RIF_COUNTER_EGRESS:
145 return &rif->counter_egress;
146 case MLXSW_SP_RIF_COUNTER_INGRESS:
147 return &rif->counter_ingress;
148 }
149 return NULL;
150}
151
152static bool
153mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
154 enum mlxsw_sp_rif_counter_dir dir)
155{
156 switch (dir) {
157 case MLXSW_SP_RIF_COUNTER_EGRESS:
158 return rif->counter_egress_valid;
159 case MLXSW_SP_RIF_COUNTER_INGRESS:
160 return rif->counter_ingress_valid;
161 }
162 return false;
163}
164
165static void
166mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir,
168 bool valid)
169{
170 switch (dir) {
171 case MLXSW_SP_RIF_COUNTER_EGRESS:
172 rif->counter_egress_valid = valid;
173 break;
174 case MLXSW_SP_RIF_COUNTER_INGRESS:
175 rif->counter_ingress_valid = valid;
176 break;
177 }
178}
179
180static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
181 unsigned int counter_index, bool enable,
182 enum mlxsw_sp_rif_counter_dir dir)
183{
184 char ritr_pl[MLXSW_REG_RITR_LEN];
185 bool is_egress = false;
186 int err;
187
188 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
189 is_egress = true;
190 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
191 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
192 if (err)
193 return err;
194
195 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
196 is_egress);
197 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
198}
199
200int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
201 struct mlxsw_sp_rif *rif,
202 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
203{
204 char ricnt_pl[MLXSW_REG_RICNT_LEN];
205 unsigned int *p_counter_index;
206 bool valid;
207 int err;
208
209 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
210 if (!valid)
211 return -EINVAL;
212
213 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
214 if (!p_counter_index)
215 return -EINVAL;
216 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
217 MLXSW_REG_RICNT_OPCODE_NOP);
218 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
219 if (err)
220 return err;
221 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
222 return 0;
223}
224
225static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
226 unsigned int counter_index)
227{
228 char ricnt_pl[MLXSW_REG_RICNT_LEN];
229
230 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
231 MLXSW_REG_RICNT_OPCODE_CLEAR);
232 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
233}
234
235int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
236 struct mlxsw_sp_rif *rif,
237 enum mlxsw_sp_rif_counter_dir dir)
238{
239 unsigned int *p_counter_index;
240 int err;
241
242 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
243 if (!p_counter_index)
244 return -EINVAL;
245 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
246 p_counter_index);
247 if (err)
248 return err;
249
250 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
251 if (err)
252 goto err_counter_clear;
253
254 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
255 *p_counter_index, true, dir);
256 if (err)
257 goto err_counter_edit;
258 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
259 return 0;
260
261err_counter_edit:
262err_counter_clear:
263 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
264 *p_counter_index);
265 return err;
266}
267
268void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
269 struct mlxsw_sp_rif *rif,
270 enum mlxsw_sp_rif_counter_dir dir)
271{
272 unsigned int *p_counter_index;
273
6b1206bb
AS
274 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
275 return;
276
e0c0afd8
AS
277 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
278 if (WARN_ON(!p_counter_index))
279 return;
280 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, false, dir);
282 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
283 *p_counter_index);
284 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
285}
286
e4f3c1c1
IS
287static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
288{
289 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
290 struct devlink *devlink;
291
292 devlink = priv_to_devlink(mlxsw_sp->core);
293 if (!devlink_dpipe_table_counter_enabled(devlink,
294 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
295 return;
296 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
297}
298
299static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
300{
301 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
302
303 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
304}
305
4724ba56
IS
306static struct mlxsw_sp_rif *
307mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
308 const struct net_device *dev);
309
9011b677
IS
310#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
311
312struct mlxsw_sp_prefix_usage {
313 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
314};
315
53342023
JP
316#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
317 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
318
6b75c480
JP
319static bool
320mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
321 struct mlxsw_sp_prefix_usage *prefix_usage2)
322{
323 unsigned char prefix;
324
325 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
326 if (!test_bit(prefix, prefix_usage2->b))
327 return false;
328 }
329 return true;
330}
331
53342023
JP
332static bool
333mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
334 struct mlxsw_sp_prefix_usage *prefix_usage2)
335{
336 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
337}
338
6b75c480
JP
339static bool
340mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
341{
342 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
343
344 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
345}
346
347static void
348mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
349 struct mlxsw_sp_prefix_usage *prefix_usage2)
350{
351 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
352}
353
5e9c16cc
JP
354static void
355mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
356 unsigned char prefix_len)
357{
358 set_bit(prefix_len, prefix_usage->b);
359}
360
361static void
362mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
363 unsigned char prefix_len)
364{
365 clear_bit(prefix_len, prefix_usage->b);
366}
367
368struct mlxsw_sp_fib_key {
369 unsigned char addr[sizeof(struct in6_addr)];
370 unsigned char prefix_len;
371};
372
61c503f9
JP
373enum mlxsw_sp_fib_entry_type {
374 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
375 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
376 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
377};
378
a7ff87ac 379struct mlxsw_sp_nexthop_group;
9011b677 380struct mlxsw_sp_fib;
a7ff87ac 381
9aecce1c
IS
382struct mlxsw_sp_fib_node {
383 struct list_head entry_list;
b45f64d1 384 struct list_head list;
9aecce1c 385 struct rhash_head ht_node;
76610ebb 386 struct mlxsw_sp_fib *fib;
5e9c16cc 387 struct mlxsw_sp_fib_key key;
9aecce1c
IS
388};
389
390struct mlxsw_sp_fib_entry_params {
391 u32 tb_id;
392 u32 prio;
393 u8 tos;
394 u8 type;
395};
396
397struct mlxsw_sp_fib_entry {
398 struct list_head list;
399 struct mlxsw_sp_fib_node *fib_node;
61c503f9 400 enum mlxsw_sp_fib_entry_type type;
a7ff87ac
JP
401 struct list_head nexthop_group_node;
402 struct mlxsw_sp_nexthop_group *nh_group;
9aecce1c 403 struct mlxsw_sp_fib_entry_params params;
013b20f9 404 bool offloaded;
5e9c16cc
JP
405};
406
9011b677
IS
407enum mlxsw_sp_l3proto {
408 MLXSW_SP_L3_PROTO_IPV4,
409 MLXSW_SP_L3_PROTO_IPV6,
410};
411
412struct mlxsw_sp_lpm_tree {
413 u8 id; /* tree ID */
414 unsigned int ref_count;
415 enum mlxsw_sp_l3proto proto;
416 struct mlxsw_sp_prefix_usage prefix_usage;
417};
418
5e9c16cc
JP
419struct mlxsw_sp_fib {
420 struct rhashtable ht;
9aecce1c 421 struct list_head node_list;
76610ebb
IS
422 struct mlxsw_sp_vr *vr;
423 struct mlxsw_sp_lpm_tree *lpm_tree;
5e9c16cc
JP
424 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
425 struct mlxsw_sp_prefix_usage prefix_usage;
76610ebb 426 enum mlxsw_sp_l3proto proto;
5e9c16cc
JP
427};
428
9011b677
IS
429struct mlxsw_sp_vr {
430 u16 id; /* virtual router ID */
431 u32 tb_id; /* kernel fib table id */
432 unsigned int rif_count;
433 struct mlxsw_sp_fib *fib4;
a3d9bc50 434 struct mlxsw_sp_fib *fib6;
9011b677
IS
435};
436
9aecce1c 437static const struct rhashtable_params mlxsw_sp_fib_ht_params;
5e9c16cc 438
76610ebb
IS
439static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
440 enum mlxsw_sp_l3proto proto)
5e9c16cc
JP
441{
442 struct mlxsw_sp_fib *fib;
443 int err;
444
445 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
446 if (!fib)
447 return ERR_PTR(-ENOMEM);
448 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
449 if (err)
450 goto err_rhashtable_init;
9aecce1c 451 INIT_LIST_HEAD(&fib->node_list);
76610ebb
IS
452 fib->proto = proto;
453 fib->vr = vr;
5e9c16cc
JP
454 return fib;
455
456err_rhashtable_init:
457 kfree(fib);
458 return ERR_PTR(err);
459}
460
461static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
462{
9aecce1c 463 WARN_ON(!list_empty(&fib->node_list));
76610ebb 464 WARN_ON(fib->lpm_tree);
5e9c16cc
JP
465 rhashtable_destroy(&fib->ht);
466 kfree(fib);
467}
468
53342023 469static struct mlxsw_sp_lpm_tree *
382dbb40 470mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
53342023
JP
471{
472 static struct mlxsw_sp_lpm_tree *lpm_tree;
473 int i;
474
9011b677
IS
475 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
476 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
382dbb40
IS
477 if (lpm_tree->ref_count == 0)
478 return lpm_tree;
53342023
JP
479 }
480 return NULL;
481}
482
483static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
484 struct mlxsw_sp_lpm_tree *lpm_tree)
485{
486 char ralta_pl[MLXSW_REG_RALTA_LEN];
487
1a9234e6
IS
488 mlxsw_reg_ralta_pack(ralta_pl, true,
489 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
490 lpm_tree->id);
53342023
JP
491 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
492}
493
494static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
495 struct mlxsw_sp_lpm_tree *lpm_tree)
496{
497 char ralta_pl[MLXSW_REG_RALTA_LEN];
498
1a9234e6
IS
499 mlxsw_reg_ralta_pack(ralta_pl, false,
500 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
501 lpm_tree->id);
53342023
JP
502 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
503}
504
505static int
506mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_prefix_usage *prefix_usage,
508 struct mlxsw_sp_lpm_tree *lpm_tree)
509{
510 char ralst_pl[MLXSW_REG_RALST_LEN];
511 u8 root_bin = 0;
512 u8 prefix;
513 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
514
515 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
516 root_bin = prefix;
517
518 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
519 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
520 if (prefix == 0)
521 continue;
522 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
523 MLXSW_REG_RALST_BIN_NO_CHILD);
524 last_prefix = prefix;
525 }
526 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
527}
528
529static struct mlxsw_sp_lpm_tree *
530mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
531 struct mlxsw_sp_prefix_usage *prefix_usage,
382dbb40 532 enum mlxsw_sp_l3proto proto)
53342023
JP
533{
534 struct mlxsw_sp_lpm_tree *lpm_tree;
535 int err;
536
382dbb40 537 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
53342023
JP
538 if (!lpm_tree)
539 return ERR_PTR(-EBUSY);
540 lpm_tree->proto = proto;
541 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
542 if (err)
543 return ERR_PTR(err);
544
545 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
546 lpm_tree);
547 if (err)
548 goto err_left_struct_set;
2083d367
JP
549 memcpy(&lpm_tree->prefix_usage, prefix_usage,
550 sizeof(lpm_tree->prefix_usage));
53342023
JP
551 return lpm_tree;
552
553err_left_struct_set:
554 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
555 return ERR_PTR(err);
556}
557
558static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
559 struct mlxsw_sp_lpm_tree *lpm_tree)
560{
561 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
562}
563
564static struct mlxsw_sp_lpm_tree *
565mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
566 struct mlxsw_sp_prefix_usage *prefix_usage,
382dbb40 567 enum mlxsw_sp_l3proto proto)
53342023
JP
568{
569 struct mlxsw_sp_lpm_tree *lpm_tree;
570 int i;
571
9011b677
IS
572 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
573 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
8b99becd
JP
574 if (lpm_tree->ref_count != 0 &&
575 lpm_tree->proto == proto &&
53342023
JP
576 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
577 prefix_usage))
578 goto inc_ref_count;
579 }
580 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
382dbb40 581 proto);
53342023
JP
582 if (IS_ERR(lpm_tree))
583 return lpm_tree;
584
585inc_ref_count:
586 lpm_tree->ref_count++;
587 return lpm_tree;
588}
589
590static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
591 struct mlxsw_sp_lpm_tree *lpm_tree)
592{
593 if (--lpm_tree->ref_count == 0)
594 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
595 return 0;
596}
597
d7a60306 598#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
8494ab06
IS
599
600static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
53342023
JP
601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
8494ab06 603 u64 max_trees;
53342023
JP
604 int i;
605
8494ab06
IS
606 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
607 return -EIO;
608
609 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
9011b677
IS
610 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
611 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
8494ab06
IS
612 sizeof(struct mlxsw_sp_lpm_tree),
613 GFP_KERNEL);
9011b677 614 if (!mlxsw_sp->router->lpm.trees)
8494ab06
IS
615 return -ENOMEM;
616
9011b677
IS
617 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
618 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
53342023
JP
619 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
620 }
8494ab06
IS
621
622 return 0;
623}
624
625static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
626{
9011b677 627 kfree(mlxsw_sp->router->lpm.trees);
53342023
JP
628}
629
76610ebb
IS
630static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
631{
a3d9bc50 632 return !!vr->fib4 || !!vr->fib6;
76610ebb
IS
633}
634
6b75c480
JP
635static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
636{
637 struct mlxsw_sp_vr *vr;
638 int i;
639
c1a38311 640 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 641 vr = &mlxsw_sp->router->vrs[i];
76610ebb 642 if (!mlxsw_sp_vr_is_used(vr))
6b75c480
JP
643 return vr;
644 }
645 return NULL;
646}
647
648static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
76610ebb 649 const struct mlxsw_sp_fib *fib)
6b75c480
JP
650{
651 char raltb_pl[MLXSW_REG_RALTB_LEN];
652
76610ebb
IS
653 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
654 (enum mlxsw_reg_ralxx_protocol) fib->proto,
655 fib->lpm_tree->id);
6b75c480
JP
656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
657}
658
659static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
76610ebb 660 const struct mlxsw_sp_fib *fib)
6b75c480
JP
661{
662 char raltb_pl[MLXSW_REG_RALTB_LEN];
663
664 /* Bind to tree 0 which is default */
76610ebb
IS
665 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
666 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
6b75c480
JP
667 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
668}
669
670static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
671{
672 /* For our purpose, squash main and local table into one */
673 if (tb_id == RT_TABLE_LOCAL)
674 tb_id = RT_TABLE_MAIN;
675 return tb_id;
676}
677
678static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
76610ebb 679 u32 tb_id)
6b75c480
JP
680{
681 struct mlxsw_sp_vr *vr;
682 int i;
683
684 tb_id = mlxsw_sp_fix_tb_id(tb_id);
9497c042 685
c1a38311 686 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 687 vr = &mlxsw_sp->router->vrs[i];
76610ebb 688 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
6b75c480
JP
689 return vr;
690 }
691 return NULL;
692}
693
76610ebb
IS
694static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
695 enum mlxsw_sp_l3proto proto)
696{
697 switch (proto) {
698 case MLXSW_SP_L3_PROTO_IPV4:
699 return vr->fib4;
700 case MLXSW_SP_L3_PROTO_IPV6:
a3d9bc50 701 return vr->fib6;
76610ebb
IS
702 }
703 return NULL;
704}
705
6b75c480 706static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
76610ebb 707 u32 tb_id)
6b75c480 708{
6b75c480 709 struct mlxsw_sp_vr *vr;
a3d9bc50 710 int err;
6b75c480
JP
711
712 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
713 if (!vr)
714 return ERR_PTR(-EBUSY);
76610ebb
IS
715 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
716 if (IS_ERR(vr->fib4))
717 return ERR_CAST(vr->fib4);
a3d9bc50
IS
718 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
719 if (IS_ERR(vr->fib6)) {
720 err = PTR_ERR(vr->fib6);
721 goto err_fib6_create;
722 }
6b75c480 723 vr->tb_id = tb_id;
6b75c480 724 return vr;
a3d9bc50
IS
725
726err_fib6_create:
727 mlxsw_sp_fib_destroy(vr->fib4);
728 vr->fib4 = NULL;
729 return ERR_PTR(err);
6b75c480
JP
730}
731
76610ebb 732static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
6b75c480 733{
a3d9bc50
IS
734 mlxsw_sp_fib_destroy(vr->fib6);
735 vr->fib6 = NULL;
76610ebb
IS
736 mlxsw_sp_fib_destroy(vr->fib4);
737 vr->fib4 = NULL;
6b75c480
JP
738}
739
740static int
76610ebb 741mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
6b75c480
JP
742 struct mlxsw_sp_prefix_usage *req_prefix_usage)
743{
76610ebb 744 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
f7df4923
IS
745 struct mlxsw_sp_lpm_tree *new_tree;
746 int err;
6b75c480 747
f7df4923 748 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
6b75c480
JP
749 return 0;
750
f7df4923 751 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
76610ebb 752 fib->proto);
f7df4923 753 if (IS_ERR(new_tree)) {
6b75c480
JP
754 /* We failed to get a tree according to the required
755 * prefix usage. However, the current tree might be still good
756 * for us if our requirement is subset of the prefixes used
757 * in the tree.
758 */
759 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
f7df4923 760 &lpm_tree->prefix_usage))
6b75c480 761 return 0;
f7df4923 762 return PTR_ERR(new_tree);
6b75c480
JP
763 }
764
f7df4923 765 /* Prevent packet loss by overwriting existing binding */
76610ebb
IS
766 fib->lpm_tree = new_tree;
767 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
f7df4923
IS
768 if (err)
769 goto err_tree_bind;
770 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
771
772 return 0;
773
774err_tree_bind:
76610ebb 775 fib->lpm_tree = lpm_tree;
f7df4923
IS
776 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
777 return err;
6b75c480
JP
778}
779
76610ebb 780static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
6b75c480
JP
781{
782 struct mlxsw_sp_vr *vr;
6b75c480
JP
783
784 tb_id = mlxsw_sp_fix_tb_id(tb_id);
76610ebb
IS
785 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
786 if (!vr)
787 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
6b75c480
JP
788 return vr;
789}
790
76610ebb 791static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
6b75c480 792{
a3d9bc50
IS
793 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
794 list_empty(&vr->fib6->node_list))
76610ebb 795 mlxsw_sp_vr_destroy(vr);
6b75c480
JP
796}
797
9497c042 798static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
6b75c480
JP
799{
800 struct mlxsw_sp_vr *vr;
c1a38311 801 u64 max_vrs;
6b75c480
JP
802 int i;
803
c1a38311 804 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
9497c042
NF
805 return -EIO;
806
c1a38311 807 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
9011b677
IS
808 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
809 GFP_KERNEL);
810 if (!mlxsw_sp->router->vrs)
9497c042
NF
811 return -ENOMEM;
812
c1a38311 813 for (i = 0; i < max_vrs; i++) {
9011b677 814 vr = &mlxsw_sp->router->vrs[i];
6b75c480
JP
815 vr->id = i;
816 }
9497c042
NF
817
818 return 0;
819}
820
ac571de9
IS
821static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
822
9497c042
NF
823static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
824{
3057224e
IS
825 /* At this stage we're guaranteed not to have new incoming
826 * FIB notifications and the work queue is free from FIBs
827 * sitting on top of mlxsw netdevs. However, we can still
828 * have other FIBs queued. Flush the queue before flushing
829 * the device's tables. No need for locks, as we're the only
830 * writer.
831 */
832 mlxsw_core_flush_owq();
ac571de9 833 mlxsw_sp_router_fib_flush(mlxsw_sp);
9011b677 834 kfree(mlxsw_sp->router->vrs);
6b75c480
JP
835}
836
6cf3c971 837struct mlxsw_sp_neigh_key {
33b1341c 838 struct neighbour *n;
6cf3c971
JP
839};
840
841struct mlxsw_sp_neigh_entry {
9665b745 842 struct list_head rif_list_node;
6cf3c971
JP
843 struct rhash_head ht_node;
844 struct mlxsw_sp_neigh_key key;
845 u16 rif;
5c8802f1 846 bool connected;
a6bf9e93 847 unsigned char ha[ETH_ALEN];
a7ff87ac
JP
848 struct list_head nexthop_list; /* list of nexthops using
849 * this neigh entry
850 */
b2157149 851 struct list_head nexthop_neighs_list_node;
6cf3c971
JP
852};
853
854static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
855 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
856 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
857 .key_len = sizeof(struct mlxsw_sp_neigh_key),
858};
859
6cf3c971 860static struct mlxsw_sp_neigh_entry *
5c8802f1
IS
861mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
862 u16 rif)
6cf3c971
JP
863{
864 struct mlxsw_sp_neigh_entry *neigh_entry;
865
5c8802f1 866 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
6cf3c971
JP
867 if (!neigh_entry)
868 return NULL;
5c8802f1 869
33b1341c 870 neigh_entry->key.n = n;
6cf3c971 871 neigh_entry->rif = rif;
a7ff87ac 872 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
5c8802f1 873
6cf3c971
JP
874 return neigh_entry;
875}
876
5c8802f1 877static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971
JP
878{
879 kfree(neigh_entry);
880}
881
5c8802f1
IS
882static int
883mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
884 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 885{
9011b677 886 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1
IS
887 &neigh_entry->ht_node,
888 mlxsw_sp_neigh_ht_params);
889}
6cf3c971 890
5c8802f1
IS
891static void
892mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
893 struct mlxsw_sp_neigh_entry *neigh_entry)
894{
9011b677 895 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1
IS
896 &neigh_entry->ht_node,
897 mlxsw_sp_neigh_ht_params);
6cf3c971
JP
898}
899
5c8802f1
IS
900static struct mlxsw_sp_neigh_entry *
901mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
6cf3c971 902{
6cf3c971 903 struct mlxsw_sp_neigh_entry *neigh_entry;
bf95233e 904 struct mlxsw_sp_rif *rif;
6cf3c971
JP
905 int err;
906
bf95233e
AS
907 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
908 if (!rif)
5c8802f1 909 return ERR_PTR(-EINVAL);
6cf3c971 910
bf95233e 911 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
6cf3c971 912 if (!neigh_entry)
5c8802f1
IS
913 return ERR_PTR(-ENOMEM);
914
6cf3c971
JP
915 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
916 if (err)
917 goto err_neigh_entry_insert;
5c8802f1 918
bf95233e 919 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
9665b745 920
5c8802f1 921 return neigh_entry;
6cf3c971
JP
922
923err_neigh_entry_insert:
5c8802f1
IS
924 mlxsw_sp_neigh_entry_free(neigh_entry);
925 return ERR_PTR(err);
6cf3c971
JP
926}
927
5c8802f1
IS
928static void
929mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
930 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 931{
9665b745 932 list_del(&neigh_entry->rif_list_node);
5c8802f1
IS
933 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
934 mlxsw_sp_neigh_entry_free(neigh_entry);
935}
6cf3c971 936
5c8802f1
IS
937static struct mlxsw_sp_neigh_entry *
938mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
939{
940 struct mlxsw_sp_neigh_key key;
6cf3c971 941
5c8802f1 942 key.n = n;
9011b677 943 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1 944 &key, mlxsw_sp_neigh_ht_params);
6cf3c971
JP
945}
946
c723c735
YG
947static void
948mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
949{
a6c9b5d1 950 unsigned long interval;
c723c735 951
a6c9b5d1
AS
952 interval = min_t(unsigned long,
953 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
954 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
9011b677 955 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
c723c735
YG
956}
957
958static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
959 char *rauhtd_pl,
960 int ent_index)
961{
962 struct net_device *dev;
963 struct neighbour *n;
964 __be32 dipn;
965 u32 dip;
966 u16 rif;
967
968 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
969
5f9efffb 970 if (!mlxsw_sp->router->rifs[rif]) {
c723c735
YG
971 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
972 return;
973 }
974
975 dipn = htonl(dip);
5f9efffb 976 dev = mlxsw_sp->router->rifs[rif]->dev;
c723c735
YG
977 n = neigh_lookup(&arp_tbl, &dipn, dev);
978 if (!n) {
979 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
980 &dip);
981 return;
982 }
983
984 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
985 neigh_event_send(n, NULL);
986 neigh_release(n);
987}
988
60f040ca
AS
989static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
990 char *rauhtd_pl,
991 int rec_index)
992{
993 struct net_device *dev;
994 struct neighbour *n;
995 struct in6_addr dip;
996 u16 rif;
997
998 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
999 (char *) &dip);
1000
1001 if (!mlxsw_sp->router->rifs[rif]) {
1002 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1003 return;
1004 }
1005
1006 dev = mlxsw_sp->router->rifs[rif]->dev;
1007 n = neigh_lookup(&nd_tbl, &dip, dev);
1008 if (!n) {
1009 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1010 &dip);
1011 return;
1012 }
1013
1014 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1015 neigh_event_send(n, NULL);
1016 neigh_release(n);
1017}
1018
c723c735
YG
1019static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1020 char *rauhtd_pl,
1021 int rec_index)
1022{
1023 u8 num_entries;
1024 int i;
1025
1026 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1027 rec_index);
1028 /* Hardware starts counting at 0, so add 1. */
1029 num_entries++;
1030
1031 /* Each record consists of several neighbour entries. */
1032 for (i = 0; i < num_entries; i++) {
1033 int ent_index;
1034
1035 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1036 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1037 ent_index);
1038 }
1039
1040}
1041
60f040ca
AS
1042static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1043 char *rauhtd_pl,
1044 int rec_index)
1045{
1046 /* One record contains one entry. */
1047 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1048 rec_index);
1049}
1050
c723c735
YG
1051static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1052 char *rauhtd_pl, int rec_index)
1053{
1054 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1055 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1056 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1057 rec_index);
1058 break;
1059 case MLXSW_REG_RAUHTD_TYPE_IPV6:
60f040ca
AS
1060 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1061 rec_index);
c723c735
YG
1062 break;
1063 }
1064}
1065
42cdb338
AS
1066static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1067{
1068 u8 num_rec, last_rec_index, num_entries;
1069
1070 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1071 last_rec_index = num_rec - 1;
1072
1073 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1074 return false;
1075 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1076 MLXSW_REG_RAUHTD_TYPE_IPV6)
1077 return true;
1078
1079 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1080 last_rec_index);
1081 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1082 return true;
1083 return false;
1084}
1085
60f040ca
AS
1086static int
1087__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1088 char *rauhtd_pl,
1089 enum mlxsw_reg_rauhtd_type type)
c723c735 1090{
60f040ca
AS
1091 int i, num_rec;
1092 int err;
c723c735
YG
1093
1094 /* Make sure the neighbour's netdev isn't removed in the
1095 * process.
1096 */
1097 rtnl_lock();
1098 do {
60f040ca 1099 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
c723c735
YG
1100 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1101 rauhtd_pl);
1102 if (err) {
1103 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
1104 break;
1105 }
1106 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1107 for (i = 0; i < num_rec; i++)
1108 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1109 i);
42cdb338 1110 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
c723c735
YG
1111 rtnl_unlock();
1112
60f040ca
AS
1113 return err;
1114}
1115
1116static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1117{
1118 enum mlxsw_reg_rauhtd_type type;
1119 char *rauhtd_pl;
1120 int err;
1121
1122 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1123 if (!rauhtd_pl)
1124 return -ENOMEM;
1125
1126 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1127 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1128 if (err)
1129 goto out;
1130
1131 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1132 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1133out:
c723c735 1134 kfree(rauhtd_pl);
b2157149
YG
1135 return err;
1136}
1137
1138static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1139{
1140 struct mlxsw_sp_neigh_entry *neigh_entry;
1141
1142 /* Take RTNL mutex here to prevent lists from changes */
1143 rtnl_lock();
9011b677 1144 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
8a0b7275 1145 nexthop_neighs_list_node)
b2157149
YG
1146 /* If this neigh have nexthops, make the kernel think this neigh
1147 * is active regardless of the traffic.
1148 */
8a0b7275 1149 neigh_event_send(neigh_entry->key.n, NULL);
b2157149
YG
1150 rtnl_unlock();
1151}
1152
1153static void
1154mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1155{
9011b677 1156 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
b2157149 1157
9011b677 1158 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
b2157149
YG
1159 msecs_to_jiffies(interval));
1160}
1161
1162static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1163{
9011b677 1164 struct mlxsw_sp_router *router;
b2157149
YG
1165 int err;
1166
9011b677
IS
1167 router = container_of(work, struct mlxsw_sp_router,
1168 neighs_update.dw.work);
1169 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
b2157149 1170 if (err)
9011b677 1171 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
b2157149 1172
9011b677 1173 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
b2157149 1174
9011b677 1175 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
c723c735
YG
1176}
1177
0b2361d9
YG
1178static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1179{
1180 struct mlxsw_sp_neigh_entry *neigh_entry;
9011b677 1181 struct mlxsw_sp_router *router;
0b2361d9 1182
9011b677
IS
1183 router = container_of(work, struct mlxsw_sp_router,
1184 nexthop_probe_dw.work);
0b2361d9
YG
1185 /* Iterate over nexthop neighbours, find those who are unresolved and
1186 * send arp on them. This solves the chicken-egg problem when
1187 * the nexthop wouldn't get offloaded until the neighbor is resolved
1188 * but it wouldn't get resolved ever in case traffic is flowing in HW
1189 * using different nexthop.
1190 *
1191 * Take RTNL mutex here to prevent lists from changes.
1192 */
1193 rtnl_lock();
9011b677 1194 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
8a0b7275 1195 nexthop_neighs_list_node)
01b1aa35 1196 if (!neigh_entry->connected)
33b1341c 1197 neigh_event_send(neigh_entry->key.n, NULL);
0b2361d9
YG
1198 rtnl_unlock();
1199
9011b677 1200 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
0b2361d9
YG
1201 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1202}
1203
a7ff87ac
JP
1204static void
1205mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1206 struct mlxsw_sp_neigh_entry *neigh_entry,
1207 bool removing);
1208
5c8802f1
IS
1209static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
1210{
1211 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1212 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1213}
1214
1215static void
1216mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1217 struct mlxsw_sp_neigh_entry *neigh_entry,
1218 enum mlxsw_reg_rauht_op op)
a6bf9e93 1219{
33b1341c 1220 struct neighbour *n = neigh_entry->key.n;
5c8802f1 1221 u32 dip = ntohl(*((__be32 *) n->primary_key));
a6bf9e93 1222 char rauht_pl[MLXSW_REG_RAUHT_LEN];
5c8802f1
IS
1223
1224 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1225 dip);
1226 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1227}
1228
d5eb89cf
AS
1229static void
1230mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1231 struct mlxsw_sp_neigh_entry *neigh_entry,
1232 enum mlxsw_reg_rauht_op op)
1233{
1234 struct neighbour *n = neigh_entry->key.n;
1235 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1236 const char *dip = n->primary_key;
1237
1238 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1239 dip);
1240 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1241}
1242
1243static bool mlxsw_sp_neigh_ipv6_ignore(struct neighbour *n)
1244{
1245 /* Packets with a link-local destination address are trapped
1246 * after LPM lookup and never reach the neighbour table, so
1247 * there is no need to program such neighbours to the device.
1248 */
1249 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1250 IPV6_ADDR_LINKLOCAL)
1251 return true;
1252 return false;
1253}
1254
5c8802f1
IS
1255static void
1256mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1257 struct mlxsw_sp_neigh_entry *neigh_entry,
1258 bool adding)
1259{
1260 if (!adding && !neigh_entry->connected)
1261 return;
1262 neigh_entry->connected = adding;
d5eb89cf 1263 if (neigh_entry->key.n->tbl == &arp_tbl) {
5c8802f1
IS
1264 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1265 mlxsw_sp_rauht_op(adding));
d5eb89cf
AS
1266 } else if (neigh_entry->key.n->tbl == &nd_tbl) {
1267 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry->key.n))
1268 return;
1269 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
1270 mlxsw_sp_rauht_op(adding));
1271 } else {
5c8802f1 1272 WARN_ON_ONCE(1);
d5eb89cf 1273 }
5c8802f1
IS
1274}
1275
1276struct mlxsw_sp_neigh_event_work {
1277 struct work_struct work;
1278 struct mlxsw_sp *mlxsw_sp;
1279 struct neighbour *n;
1280};
1281
1282static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1283{
1284 struct mlxsw_sp_neigh_event_work *neigh_work =
1285 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1286 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1287 struct mlxsw_sp_neigh_entry *neigh_entry;
1288 struct neighbour *n = neigh_work->n;
1289 unsigned char ha[ETH_ALEN];
a6bf9e93 1290 bool entry_connected;
93a87e5e 1291 u8 nud_state, dead;
a6bf9e93 1292
5c8802f1
IS
1293 /* If these parameters are changed after we release the lock,
1294 * then we are guaranteed to receive another event letting us
1295 * know about it.
1296 */
a6bf9e93 1297 read_lock_bh(&n->lock);
5c8802f1 1298 memcpy(ha, n->ha, ETH_ALEN);
a6bf9e93 1299 nud_state = n->nud_state;
93a87e5e 1300 dead = n->dead;
a6bf9e93
YG
1301 read_unlock_bh(&n->lock);
1302
5c8802f1 1303 rtnl_lock();
93a87e5e 1304 entry_connected = nud_state & NUD_VALID && !dead;
5c8802f1
IS
1305 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1306 if (!entry_connected && !neigh_entry)
1307 goto out;
1308 if (!neigh_entry) {
1309 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1310 if (IS_ERR(neigh_entry))
1311 goto out;
a6bf9e93
YG
1312 }
1313
5c8802f1
IS
1314 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1315 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1316 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1317
1318 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1319 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1320
1321out:
1322 rtnl_unlock();
a6bf9e93 1323 neigh_release(n);
5c8802f1 1324 kfree(neigh_work);
a6bf9e93
YG
1325}
1326
e7322638
JP
1327int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1328 unsigned long event, void *ptr)
c723c735 1329{
5c8802f1 1330 struct mlxsw_sp_neigh_event_work *neigh_work;
c723c735
YG
1331 struct mlxsw_sp_port *mlxsw_sp_port;
1332 struct mlxsw_sp *mlxsw_sp;
1333 unsigned long interval;
1334 struct neigh_parms *p;
a6bf9e93 1335 struct neighbour *n;
c723c735
YG
1336
1337 switch (event) {
1338 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1339 p = ptr;
1340
1341 /* We don't care about changes in the default table. */
a6c9b5d1 1342 if (!p->dev || (p->tbl != &arp_tbl && p->tbl != &nd_tbl))
c723c735
YG
1343 return NOTIFY_DONE;
1344
1345 /* We are in atomic context and can't take RTNL mutex,
1346 * so use RCU variant to walk the device chain.
1347 */
1348 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1349 if (!mlxsw_sp_port)
1350 return NOTIFY_DONE;
1351
1352 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1353 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
9011b677 1354 mlxsw_sp->router->neighs_update.interval = interval;
c723c735
YG
1355
1356 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1357 break;
a6bf9e93
YG
1358 case NETEVENT_NEIGH_UPDATE:
1359 n = ptr;
a6bf9e93 1360
d5eb89cf 1361 if (n->tbl != &arp_tbl && n->tbl != &nd_tbl)
a6bf9e93
YG
1362 return NOTIFY_DONE;
1363
5c8802f1 1364 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
a6bf9e93
YG
1365 if (!mlxsw_sp_port)
1366 return NOTIFY_DONE;
1367
5c8802f1
IS
1368 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1369 if (!neigh_work) {
a6bf9e93 1370 mlxsw_sp_port_dev_put(mlxsw_sp_port);
5c8802f1 1371 return NOTIFY_BAD;
a6bf9e93 1372 }
5c8802f1
IS
1373
1374 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1375 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1376 neigh_work->n = n;
a6bf9e93
YG
1377
1378 /* Take a reference to ensure the neighbour won't be
1379 * destructed until we drop the reference in delayed
1380 * work.
1381 */
1382 neigh_clone(n);
5c8802f1
IS
1383 mlxsw_core_schedule_work(&neigh_work->work);
1384 mlxsw_sp_port_dev_put(mlxsw_sp_port);
a6bf9e93 1385 break;
c723c735
YG
1386 }
1387
1388 return NOTIFY_DONE;
1389}
1390
6cf3c971
JP
1391static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1392{
c723c735
YG
1393 int err;
1394
9011b677 1395 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
c723c735
YG
1396 &mlxsw_sp_neigh_ht_params);
1397 if (err)
1398 return err;
1399
1400 /* Initialize the polling interval according to the default
1401 * table.
1402 */
1403 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1404
0b2361d9 1405 /* Create the delayed works for the activity_update */
9011b677 1406 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
c723c735 1407 mlxsw_sp_router_neighs_update_work);
9011b677 1408 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
0b2361d9 1409 mlxsw_sp_router_probe_unresolved_nexthops);
9011b677
IS
1410 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1411 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
c723c735 1412 return 0;
6cf3c971
JP
1413}
1414
1415static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1416{
9011b677
IS
1417 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1418 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1419 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
6cf3c971
JP
1420}
1421
9665b745 1422static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
bf95233e 1423 const struct mlxsw_sp_rif *rif)
9665b745
IS
1424{
1425 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1426
1427 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
bf95233e 1428 rif->rif_index, rif->addr);
9665b745
IS
1429 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1430}
1431
1432static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 1433 struct mlxsw_sp_rif *rif)
9665b745
IS
1434{
1435 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1436
bf95233e
AS
1437 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1438 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
9665b745
IS
1439 rif_list_node)
1440 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1441}
1442
c53b8e1b
IS
1443struct mlxsw_sp_nexthop_key {
1444 struct fib_nh *fib_nh;
1445};
1446
a7ff87ac
JP
1447struct mlxsw_sp_nexthop {
1448 struct list_head neigh_list_node; /* member of neigh entry list */
9665b745 1449 struct list_head rif_list_node;
a7ff87ac
JP
1450 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1451 * this belongs to
1452 */
c53b8e1b
IS
1453 struct rhash_head ht_node;
1454 struct mlxsw_sp_nexthop_key key;
58adf2c4 1455 unsigned char gw_addr[sizeof(struct in6_addr)];
bf95233e 1456 struct mlxsw_sp_rif *rif;
a7ff87ac
JP
1457 u8 should_offload:1, /* set indicates this neigh is connected and
1458 * should be put to KVD linear area of this group.
1459 */
1460 offloaded:1, /* set in case the neigh is actually put into
1461 * KVD linear area of this group.
1462 */
1463 update:1; /* set indicates that MAC of this neigh should be
1464 * updated in HW
1465 */
1466 struct mlxsw_sp_neigh_entry *neigh_entry;
1467};
1468
e9ad5e7d
IS
1469struct mlxsw_sp_nexthop_group_key {
1470 struct fib_info *fi;
1471};
1472
a7ff87ac 1473struct mlxsw_sp_nexthop_group {
e9ad5e7d 1474 struct rhash_head ht_node;
a7ff87ac 1475 struct list_head fib_list; /* list of fib entries that use this group */
58adf2c4 1476 struct neigh_table *neigh_tbl;
e9ad5e7d 1477 struct mlxsw_sp_nexthop_group_key key;
b3e8d1eb
IS
1478 u8 adj_index_valid:1,
1479 gateway:1; /* routes using the group use a gateway */
a7ff87ac
JP
1480 u32 adj_index;
1481 u16 ecmp_size;
1482 u16 count;
1483 struct mlxsw_sp_nexthop nexthops[0];
bf95233e 1484#define nh_rif nexthops[0].rif
a7ff87ac
JP
1485};
1486
e9ad5e7d
IS
1487static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1488 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1489 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1490 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1491};
1492
1493static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1494 struct mlxsw_sp_nexthop_group *nh_grp)
1495{
9011b677 1496 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
1497 &nh_grp->ht_node,
1498 mlxsw_sp_nexthop_group_ht_params);
1499}
1500
1501static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1502 struct mlxsw_sp_nexthop_group *nh_grp)
1503{
9011b677 1504 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
1505 &nh_grp->ht_node,
1506 mlxsw_sp_nexthop_group_ht_params);
1507}
1508
1509static struct mlxsw_sp_nexthop_group *
1510mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1511 struct mlxsw_sp_nexthop_group_key key)
1512{
9011b677 1513 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
e9ad5e7d
IS
1514 mlxsw_sp_nexthop_group_ht_params);
1515}
1516
c53b8e1b
IS
1517static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1518 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1519 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1520 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1521};
1522
1523static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1524 struct mlxsw_sp_nexthop *nh)
1525{
9011b677 1526 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
c53b8e1b
IS
1527 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1528}
1529
1530static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1531 struct mlxsw_sp_nexthop *nh)
1532{
9011b677 1533 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
c53b8e1b
IS
1534 mlxsw_sp_nexthop_ht_params);
1535}
1536
ad178c8e
IS
1537static struct mlxsw_sp_nexthop *
1538mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1539 struct mlxsw_sp_nexthop_key key)
1540{
9011b677 1541 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
ad178c8e
IS
1542 mlxsw_sp_nexthop_ht_params);
1543}
1544
a7ff87ac 1545static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
76610ebb 1546 const struct mlxsw_sp_fib *fib,
a7ff87ac
JP
1547 u32 adj_index, u16 ecmp_size,
1548 u32 new_adj_index,
1549 u16 new_ecmp_size)
1550{
1551 char raleu_pl[MLXSW_REG_RALEU_LEN];
1552
1a9234e6 1553 mlxsw_reg_raleu_pack(raleu_pl,
76610ebb
IS
1554 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1555 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1a9234e6 1556 new_ecmp_size);
a7ff87ac
JP
1557 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1558}
1559
1560static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1561 struct mlxsw_sp_nexthop_group *nh_grp,
1562 u32 old_adj_index, u16 old_ecmp_size)
1563{
1564 struct mlxsw_sp_fib_entry *fib_entry;
76610ebb 1565 struct mlxsw_sp_fib *fib = NULL;
a7ff87ac
JP
1566 int err;
1567
1568 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
76610ebb 1569 if (fib == fib_entry->fib_node->fib)
a7ff87ac 1570 continue;
76610ebb
IS
1571 fib = fib_entry->fib_node->fib;
1572 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
a7ff87ac
JP
1573 old_adj_index,
1574 old_ecmp_size,
1575 nh_grp->adj_index,
1576 nh_grp->ecmp_size);
1577 if (err)
1578 return err;
1579 }
1580 return 0;
1581}
1582
1583static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1584 struct mlxsw_sp_nexthop *nh)
1585{
1586 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1587 char ratr_pl[MLXSW_REG_RATR_LEN];
1588
1589 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1590 true, adj_index, neigh_entry->rif);
1591 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1593}
1594
1595static int
1596mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
a59b7e02
IS
1597 struct mlxsw_sp_nexthop_group *nh_grp,
1598 bool reallocate)
a7ff87ac
JP
1599{
1600 u32 adj_index = nh_grp->adj_index; /* base */
1601 struct mlxsw_sp_nexthop *nh;
1602 int i;
1603 int err;
1604
1605 for (i = 0; i < nh_grp->count; i++) {
1606 nh = &nh_grp->nexthops[i];
1607
1608 if (!nh->should_offload) {
1609 nh->offloaded = 0;
1610 continue;
1611 }
1612
a59b7e02 1613 if (nh->update || reallocate) {
a7ff87ac
JP
1614 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1615 adj_index, nh);
1616 if (err)
1617 return err;
1618 nh->update = 0;
1619 nh->offloaded = 1;
1620 }
1621 adj_index++;
1622 }
1623 return 0;
1624}
1625
1626static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1627 struct mlxsw_sp_fib_entry *fib_entry);
1628
1629static int
1630mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1631 struct mlxsw_sp_nexthop_group *nh_grp)
1632{
1633 struct mlxsw_sp_fib_entry *fib_entry;
1634 int err;
1635
1636 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1637 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1638 if (err)
1639 return err;
1640 }
1641 return 0;
1642}
1643
1644static void
1645mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1646 struct mlxsw_sp_nexthop_group *nh_grp)
1647{
1648 struct mlxsw_sp_nexthop *nh;
1649 bool offload_change = false;
1650 u32 adj_index;
1651 u16 ecmp_size = 0;
1652 bool old_adj_index_valid;
1653 u32 old_adj_index;
1654 u16 old_ecmp_size;
a7ff87ac
JP
1655 int i;
1656 int err;
1657
b3e8d1eb
IS
1658 if (!nh_grp->gateway) {
1659 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1660 return;
1661 }
1662
a7ff87ac
JP
1663 for (i = 0; i < nh_grp->count; i++) {
1664 nh = &nh_grp->nexthops[i];
1665
1666 if (nh->should_offload ^ nh->offloaded) {
1667 offload_change = true;
1668 if (nh->should_offload)
1669 nh->update = 1;
1670 }
1671 if (nh->should_offload)
1672 ecmp_size++;
1673 }
1674 if (!offload_change) {
1675 /* Nothing was added or removed, so no need to reallocate. Just
1676 * update MAC on existing adjacency indexes.
1677 */
a59b7e02
IS
1678 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1679 false);
a7ff87ac
JP
1680 if (err) {
1681 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1682 goto set_trap;
1683 }
1684 return;
1685 }
1686 if (!ecmp_size)
1687 /* No neigh of this group is connected so we just set
1688 * the trap and let everthing flow through kernel.
1689 */
1690 goto set_trap;
1691
13124443
AS
1692 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1693 if (err) {
a7ff87ac
JP
1694 /* We ran out of KVD linear space, just set the
1695 * trap and let everything flow through kernel.
1696 */
1697 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1698 goto set_trap;
1699 }
a7ff87ac
JP
1700 old_adj_index_valid = nh_grp->adj_index_valid;
1701 old_adj_index = nh_grp->adj_index;
1702 old_ecmp_size = nh_grp->ecmp_size;
1703 nh_grp->adj_index_valid = 1;
1704 nh_grp->adj_index = adj_index;
1705 nh_grp->ecmp_size = ecmp_size;
a59b7e02 1706 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
a7ff87ac
JP
1707 if (err) {
1708 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1709 goto set_trap;
1710 }
1711
1712 if (!old_adj_index_valid) {
1713 /* The trap was set for fib entries, so we have to call
1714 * fib entry update to unset it and use adjacency index.
1715 */
1716 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1717 if (err) {
1718 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1719 goto set_trap;
1720 }
1721 return;
1722 }
1723
1724 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1725 old_adj_index, old_ecmp_size);
1726 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1727 if (err) {
1728 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1729 goto set_trap;
1730 }
1731 return;
1732
1733set_trap:
1734 old_adj_index_valid = nh_grp->adj_index_valid;
1735 nh_grp->adj_index_valid = 0;
1736 for (i = 0; i < nh_grp->count; i++) {
1737 nh = &nh_grp->nexthops[i];
1738 nh->offloaded = 0;
1739 }
1740 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1741 if (err)
1742 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1743 if (old_adj_index_valid)
1744 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1745}
1746
1747static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1748 bool removing)
1749{
1750 if (!removing && !nh->should_offload)
1751 nh->should_offload = 1;
1752 else if (removing && nh->offloaded)
1753 nh->should_offload = 0;
1754 nh->update = 1;
1755}
1756
1757static void
1758mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1759 struct mlxsw_sp_neigh_entry *neigh_entry,
1760 bool removing)
1761{
1762 struct mlxsw_sp_nexthop *nh;
1763
a7ff87ac
JP
1764 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1765 neigh_list_node) {
1766 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1767 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1768 }
a7ff87ac
JP
1769}
1770
9665b745 1771static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
bf95233e 1772 struct mlxsw_sp_rif *rif)
9665b745 1773{
bf95233e 1774 if (nh->rif)
9665b745
IS
1775 return;
1776
bf95233e
AS
1777 nh->rif = rif;
1778 list_add(&nh->rif_list_node, &rif->nexthop_list);
9665b745
IS
1779}
1780
1781static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1782{
bf95233e 1783 if (!nh->rif)
9665b745
IS
1784 return;
1785
1786 list_del(&nh->rif_list_node);
bf95233e 1787 nh->rif = NULL;
9665b745
IS
1788}
1789
a8c97014
IS
1790static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1791 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1792{
1793 struct mlxsw_sp_neigh_entry *neigh_entry;
a7ff87ac 1794 struct neighbour *n;
93a87e5e 1795 u8 nud_state, dead;
c53b8e1b
IS
1796 int err;
1797
ad178c8e 1798 if (!nh->nh_grp->gateway || nh->neigh_entry)
b8399a1e
IS
1799 return 0;
1800
33b1341c
JP
1801 /* Take a reference of neigh here ensuring that neigh would
1802 * not be detructed before the nexthop entry is finished.
1803 * The reference is taken either in neigh_lookup() or
fd76d910 1804 * in neigh_create() in case n is not found.
33b1341c 1805 */
58adf2c4 1806 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
33b1341c 1807 if (!n) {
58adf2c4
IS
1808 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
1809 nh->rif->dev);
a8c97014
IS
1810 if (IS_ERR(n))
1811 return PTR_ERR(n);
a7ff87ac 1812 neigh_event_send(n, NULL);
33b1341c
JP
1813 }
1814 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1815 if (!neigh_entry) {
5c8802f1
IS
1816 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1817 if (IS_ERR(neigh_entry)) {
c53b8e1b
IS
1818 err = -EINVAL;
1819 goto err_neigh_entry_create;
5c8802f1 1820 }
a7ff87ac 1821 }
b2157149
YG
1822
1823 /* If that is the first nexthop connected to that neigh, add to
1824 * nexthop_neighs_list
1825 */
1826 if (list_empty(&neigh_entry->nexthop_list))
1827 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
9011b677 1828 &mlxsw_sp->router->nexthop_neighs_list);
b2157149 1829
a7ff87ac
JP
1830 nh->neigh_entry = neigh_entry;
1831 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1832 read_lock_bh(&n->lock);
1833 nud_state = n->nud_state;
93a87e5e 1834 dead = n->dead;
a7ff87ac 1835 read_unlock_bh(&n->lock);
93a87e5e 1836 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
a7ff87ac
JP
1837
1838 return 0;
c53b8e1b
IS
1839
1840err_neigh_entry_create:
1841 neigh_release(n);
c53b8e1b 1842 return err;
a7ff87ac
JP
1843}
1844
a8c97014
IS
1845static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1846 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1847{
1848 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
a8c97014 1849 struct neighbour *n;
a7ff87ac 1850
b8399a1e 1851 if (!neigh_entry)
a8c97014
IS
1852 return;
1853 n = neigh_entry->key.n;
b8399a1e 1854
58312125 1855 __mlxsw_sp_nexthop_neigh_update(nh, true);
a7ff87ac 1856 list_del(&nh->neigh_list_node);
e58be79e 1857 nh->neigh_entry = NULL;
b2157149
YG
1858
1859 /* If that is the last nexthop connected to that neigh, remove from
1860 * nexthop_neighs_list
1861 */
e58be79e
IS
1862 if (list_empty(&neigh_entry->nexthop_list))
1863 list_del(&neigh_entry->nexthop_neighs_list_node);
b2157149 1864
5c8802f1
IS
1865 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1866 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1867
1868 neigh_release(n);
a8c97014 1869}
c53b8e1b 1870
a8c97014
IS
1871static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1872 struct mlxsw_sp_nexthop_group *nh_grp,
1873 struct mlxsw_sp_nexthop *nh,
1874 struct fib_nh *fib_nh)
1875{
1876 struct net_device *dev = fib_nh->nh_dev;
df6dd79b 1877 struct in_device *in_dev;
bf95233e 1878 struct mlxsw_sp_rif *rif;
a8c97014
IS
1879 int err;
1880
1881 nh->nh_grp = nh_grp;
1882 nh->key.fib_nh = fib_nh;
58adf2c4 1883 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
a8c97014
IS
1884 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1885 if (err)
1886 return err;
1887
97989ee0
IS
1888 if (!dev)
1889 return 0;
1890
df6dd79b
IS
1891 in_dev = __in_dev_get_rtnl(dev);
1892 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1893 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1894 return 0;
1895
bf95233e
AS
1896 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1897 if (!rif)
a8c97014 1898 return 0;
bf95233e 1899 mlxsw_sp_nexthop_rif_init(nh, rif);
a8c97014
IS
1900
1901 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1902 if (err)
1903 goto err_nexthop_neigh_init;
1904
1905 return 0;
1906
1907err_nexthop_neigh_init:
a4e75b76 1908 mlxsw_sp_nexthop_rif_fini(nh);
a8c97014
IS
1909 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1910 return err;
1911}
1912
1913static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1914 struct mlxsw_sp_nexthop *nh)
1915{
1916 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
9665b745 1917 mlxsw_sp_nexthop_rif_fini(nh);
c53b8e1b 1918 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
a7ff87ac
JP
1919}
1920
ad178c8e
IS
1921static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1922 unsigned long event, struct fib_nh *fib_nh)
1923{
1924 struct mlxsw_sp_nexthop_key key;
1925 struct mlxsw_sp_nexthop *nh;
bf95233e 1926 struct mlxsw_sp_rif *rif;
ad178c8e 1927
9011b677 1928 if (mlxsw_sp->router->aborted)
ad178c8e
IS
1929 return;
1930
1931 key.fib_nh = fib_nh;
1932 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1933 if (WARN_ON_ONCE(!nh))
1934 return;
1935
bf95233e
AS
1936 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1937 if (!rif)
ad178c8e
IS
1938 return;
1939
1940 switch (event) {
1941 case FIB_EVENT_NH_ADD:
bf95233e 1942 mlxsw_sp_nexthop_rif_init(nh, rif);
ad178c8e
IS
1943 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1944 break;
1945 case FIB_EVENT_NH_DEL:
1946 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
9665b745 1947 mlxsw_sp_nexthop_rif_fini(nh);
ad178c8e
IS
1948 break;
1949 }
1950
1951 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1952}
1953
9665b745 1954static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 1955 struct mlxsw_sp_rif *rif)
9665b745
IS
1956{
1957 struct mlxsw_sp_nexthop *nh, *tmp;
1958
bf95233e 1959 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
9665b745
IS
1960 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1961 mlxsw_sp_nexthop_rif_fini(nh);
1962 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1963 }
1964}
1965
a7ff87ac
JP
1966static struct mlxsw_sp_nexthop_group *
1967mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1968{
1969 struct mlxsw_sp_nexthop_group *nh_grp;
1970 struct mlxsw_sp_nexthop *nh;
1971 struct fib_nh *fib_nh;
1972 size_t alloc_size;
1973 int i;
1974 int err;
1975
1976 alloc_size = sizeof(*nh_grp) +
1977 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1978 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1979 if (!nh_grp)
1980 return ERR_PTR(-ENOMEM);
1981 INIT_LIST_HEAD(&nh_grp->fib_list);
58adf2c4
IS
1982 nh_grp->neigh_tbl = &arp_tbl;
1983
b3e8d1eb 1984 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
a7ff87ac 1985 nh_grp->count = fi->fib_nhs;
e9ad5e7d 1986 nh_grp->key.fi = fi;
7387dbbc 1987 fib_info_hold(fi);
a7ff87ac
JP
1988 for (i = 0; i < nh_grp->count; i++) {
1989 nh = &nh_grp->nexthops[i];
1990 fib_nh = &fi->fib_nh[i];
1991 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1992 if (err)
1993 goto err_nexthop_init;
1994 }
e9ad5e7d
IS
1995 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1996 if (err)
1997 goto err_nexthop_group_insert;
a7ff87ac
JP
1998 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1999 return nh_grp;
2000
e9ad5e7d 2001err_nexthop_group_insert:
a7ff87ac 2002err_nexthop_init:
df6dd79b
IS
2003 for (i--; i >= 0; i--) {
2004 nh = &nh_grp->nexthops[i];
a7ff87ac 2005 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
df6dd79b 2006 }
7387dbbc 2007 fib_info_put(nh_grp->key.fi);
a7ff87ac
JP
2008 kfree(nh_grp);
2009 return ERR_PTR(err);
2010}
2011
2012static void
2013mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
2014 struct mlxsw_sp_nexthop_group *nh_grp)
2015{
2016 struct mlxsw_sp_nexthop *nh;
2017 int i;
2018
e9ad5e7d 2019 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
a7ff87ac
JP
2020 for (i = 0; i < nh_grp->count; i++) {
2021 nh = &nh_grp->nexthops[i];
2022 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
2023 }
58312125
IS
2024 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
2025 WARN_ON_ONCE(nh_grp->adj_index_valid);
7387dbbc 2026 fib_info_put(nh_grp->key.fi);
a7ff87ac
JP
2027 kfree(nh_grp);
2028}
2029
a7ff87ac
JP
2030static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
2031 struct mlxsw_sp_fib_entry *fib_entry,
2032 struct fib_info *fi)
2033{
e9ad5e7d 2034 struct mlxsw_sp_nexthop_group_key key;
a7ff87ac
JP
2035 struct mlxsw_sp_nexthop_group *nh_grp;
2036
e9ad5e7d
IS
2037 key.fi = fi;
2038 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
a7ff87ac
JP
2039 if (!nh_grp) {
2040 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
2041 if (IS_ERR(nh_grp))
2042 return PTR_ERR(nh_grp);
2043 }
2044 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
2045 fib_entry->nh_group = nh_grp;
2046 return 0;
2047}
2048
2049static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
2050 struct mlxsw_sp_fib_entry *fib_entry)
2051{
2052 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2053
2054 list_del(&fib_entry->nexthop_group_node);
2055 if (!list_empty(&nh_grp->fib_list))
2056 return;
2057 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
2058}
2059
013b20f9
IS
2060static bool
2061mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
2062{
2063 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
2064
9aecce1c
IS
2065 if (fib_entry->params.tos)
2066 return false;
2067
013b20f9
IS
2068 switch (fib_entry->type) {
2069 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2070 return !!nh_group->adj_index_valid;
2071 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
70ad3506 2072 return !!nh_group->nh_rif;
013b20f9
IS
2073 default:
2074 return false;
2075 }
2076}
2077
2078static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2079{
2080 fib_entry->offloaded = true;
2081
76610ebb 2082 switch (fib_entry->fib_node->fib->proto) {
013b20f9
IS
2083 case MLXSW_SP_L3_PROTO_IPV4:
2084 fib_info_offload_inc(fib_entry->nh_group->key.fi);
2085 break;
2086 case MLXSW_SP_L3_PROTO_IPV6:
2087 WARN_ON_ONCE(1);
2088 }
2089}
2090
2091static void
2092mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2093{
76610ebb 2094 switch (fib_entry->fib_node->fib->proto) {
013b20f9
IS
2095 case MLXSW_SP_L3_PROTO_IPV4:
2096 fib_info_offload_dec(fib_entry->nh_group->key.fi);
2097 break;
2098 case MLXSW_SP_L3_PROTO_IPV6:
2099 WARN_ON_ONCE(1);
2100 }
2101
2102 fib_entry->offloaded = false;
2103}
2104
2105static void
2106mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2107 enum mlxsw_reg_ralue_op op, int err)
2108{
2109 switch (op) {
2110 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
2111 if (!fib_entry->offloaded)
2112 return;
2113 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
2114 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
2115 if (err)
2116 return;
2117 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
2118 !fib_entry->offloaded)
2119 mlxsw_sp_fib_entry_offload_set(fib_entry);
2120 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
2121 fib_entry->offloaded)
2122 mlxsw_sp_fib_entry_offload_unset(fib_entry);
2123 return;
2124 default:
2125 return;
2126 }
2127}
2128
9dbf4d76
IS
2129static void
2130mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
2131 const struct mlxsw_sp_fib_entry *fib_entry,
2132 enum mlxsw_reg_ralue_op op)
a7ff87ac 2133{
76610ebb 2134 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
9dbf4d76
IS
2135 enum mlxsw_reg_ralxx_protocol proto;
2136 u32 *p_dip;
2137
2138 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
2139
2140 switch (fib->proto) {
2141 case MLXSW_SP_L3_PROTO_IPV4:
2142 p_dip = (u32 *) fib_entry->fib_node->key.addr;
2143 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
2144 fib_entry->fib_node->key.prefix_len,
2145 *p_dip);
2146 break;
2147 case MLXSW_SP_L3_PROTO_IPV6:
2148 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
2149 fib_entry->fib_node->key.prefix_len,
2150 fib_entry->fib_node->key.addr);
2151 break;
2152 }
2153}
2154
2155static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
2156 struct mlxsw_sp_fib_entry *fib_entry,
2157 enum mlxsw_reg_ralue_op op)
2158{
2159 char ralue_pl[MLXSW_REG_RALUE_LEN];
a7ff87ac
JP
2160 enum mlxsw_reg_ralue_trap_action trap_action;
2161 u16 trap_id = 0;
2162 u32 adjacency_index = 0;
2163 u16 ecmp_size = 0;
2164
2165 /* In case the nexthop group adjacency index is valid, use it
2166 * with provided ECMP size. Otherwise, setup trap and pass
2167 * traffic to kernel.
2168 */
4b411477 2169 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
a7ff87ac
JP
2170 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2171 adjacency_index = fib_entry->nh_group->adj_index;
2172 ecmp_size = fib_entry->nh_group->ecmp_size;
2173 } else {
2174 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2175 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2176 }
2177
9dbf4d76 2178 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
a7ff87ac
JP
2179 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
2180 adjacency_index, ecmp_size);
2181 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2182}
2183
9dbf4d76
IS
2184static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
2185 struct mlxsw_sp_fib_entry *fib_entry,
2186 enum mlxsw_reg_ralue_op op)
61c503f9 2187{
bf95233e 2188 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
70ad3506 2189 enum mlxsw_reg_ralue_trap_action trap_action;
61c503f9 2190 char ralue_pl[MLXSW_REG_RALUE_LEN];
70ad3506 2191 u16 trap_id = 0;
bf95233e 2192 u16 rif_index = 0;
70ad3506
IS
2193
2194 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2195 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
bf95233e 2196 rif_index = rif->rif_index;
70ad3506
IS
2197 } else {
2198 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2199 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2200 }
61c503f9 2201
9dbf4d76 2202 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
bf95233e
AS
2203 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2204 rif_index);
61c503f9
JP
2205 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2206}
2207
9dbf4d76
IS
2208static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
2209 struct mlxsw_sp_fib_entry *fib_entry,
2210 enum mlxsw_reg_ralue_op op)
61c503f9
JP
2211{
2212 char ralue_pl[MLXSW_REG_RALUE_LEN];
61c503f9 2213
9dbf4d76 2214 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
61c503f9
JP
2215 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2216 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2217}
2218
9dbf4d76
IS
2219static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2220 struct mlxsw_sp_fib_entry *fib_entry,
2221 enum mlxsw_reg_ralue_op op)
61c503f9
JP
2222{
2223 switch (fib_entry->type) {
2224 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
9dbf4d76 2225 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
61c503f9 2226 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
9dbf4d76 2227 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
61c503f9 2228 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
9dbf4d76 2229 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
61c503f9
JP
2230 }
2231 return -EINVAL;
2232}
2233
2234static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2235 struct mlxsw_sp_fib_entry *fib_entry,
2236 enum mlxsw_reg_ralue_op op)
2237{
9dbf4d76 2238 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
013b20f9 2239
013b20f9 2240 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
9dbf4d76 2241
013b20f9 2242 return err;
61c503f9
JP
2243}
2244
2245static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2246 struct mlxsw_sp_fib_entry *fib_entry)
2247{
7146da31
JP
2248 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2249 MLXSW_REG_RALUE_OP_WRITE_WRITE);
61c503f9
JP
2250}
2251
2252static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2253 struct mlxsw_sp_fib_entry *fib_entry)
2254{
2255 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2256 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2257}
2258
61c503f9 2259static int
013b20f9
IS
2260mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2261 const struct fib_entry_notifier_info *fen_info,
2262 struct mlxsw_sp_fib_entry *fib_entry)
61c503f9 2263{
b45f64d1 2264 struct fib_info *fi = fen_info->fi;
61c503f9 2265
97989ee0
IS
2266 switch (fen_info->type) {
2267 case RTN_BROADCAST: /* fall through */
2268 case RTN_LOCAL:
61c503f9
JP
2269 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2270 return 0;
97989ee0
IS
2271 case RTN_UNREACHABLE: /* fall through */
2272 case RTN_BLACKHOLE: /* fall through */
2273 case RTN_PROHIBIT:
2274 /* Packets hitting these routes need to be trapped, but
2275 * can do so with a lower priority than packets directed
2276 * at the host, so use action type local instead of trap.
2277 */
61c503f9 2278 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
97989ee0
IS
2279 return 0;
2280 case RTN_UNICAST:
2281 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2282 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2283 else
2284 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2285 return 0;
2286 default:
2287 return -EINVAL;
2288 }
a7ff87ac
JP
2289}
2290
5b004412 2291static struct mlxsw_sp_fib_entry *
9aecce1c
IS
2292mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2293 struct mlxsw_sp_fib_node *fib_node,
2294 const struct fib_entry_notifier_info *fen_info)
61c503f9 2295{
61c503f9 2296 struct mlxsw_sp_fib_entry *fib_entry;
61c503f9
JP
2297 int err;
2298
9aecce1c 2299 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
61c503f9
JP
2300 if (!fib_entry) {
2301 err = -ENOMEM;
9aecce1c 2302 goto err_fib_entry_alloc;
61c503f9 2303 }
61c503f9 2304
013b20f9 2305 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
61c503f9 2306 if (err)
013b20f9 2307 goto err_fib4_entry_type_set;
61c503f9 2308
9aecce1c 2309 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
b8399a1e
IS
2310 if (err)
2311 goto err_nexthop_group_get;
2312
9aecce1c
IS
2313 fib_entry->params.prio = fen_info->fi->fib_priority;
2314 fib_entry->params.tb_id = fen_info->tb_id;
2315 fib_entry->params.type = fen_info->type;
2316 fib_entry->params.tos = fen_info->tos;
2317
2318 fib_entry->fib_node = fib_node;
2319
5b004412
JP
2320 return fib_entry;
2321
b8399a1e 2322err_nexthop_group_get:
013b20f9 2323err_fib4_entry_type_set:
9aecce1c
IS
2324 kfree(fib_entry);
2325err_fib_entry_alloc:
5b004412
JP
2326 return ERR_PTR(err);
2327}
2328
9aecce1c
IS
2329static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2330 struct mlxsw_sp_fib_entry *fib_entry)
2331{
2332 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2333 kfree(fib_entry);
2334}
2335
2336static struct mlxsw_sp_fib_node *
160e22aa
IS
2337mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2338 size_t addr_len, unsigned char prefix_len);
9aecce1c 2339
5b004412 2340static struct mlxsw_sp_fib_entry *
9aecce1c
IS
2341mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2342 const struct fib_entry_notifier_info *fen_info)
5b004412 2343{
9aecce1c
IS
2344 struct mlxsw_sp_fib_entry *fib_entry;
2345 struct mlxsw_sp_fib_node *fib_node;
160e22aa
IS
2346 struct mlxsw_sp_fib *fib;
2347 struct mlxsw_sp_vr *vr;
2348
2349 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
2350 if (!vr)
2351 return NULL;
2352 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
5b004412 2353
160e22aa
IS
2354 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
2355 sizeof(fen_info->dst),
2356 fen_info->dst_len);
2357 if (!fib_node)
9aecce1c
IS
2358 return NULL;
2359
2360 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2361 if (fib_entry->params.tb_id == fen_info->tb_id &&
2362 fib_entry->params.tos == fen_info->tos &&
2363 fib_entry->params.type == fen_info->type &&
2364 fib_entry->nh_group->key.fi == fen_info->fi) {
2365 return fib_entry;
2366 }
2367 }
2368
2369 return NULL;
2370}
2371
2372static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2373 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2374 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2375 .key_len = sizeof(struct mlxsw_sp_fib_key),
2376 .automatic_shrinking = true,
2377};
2378
2379static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2380 struct mlxsw_sp_fib_node *fib_node)
2381{
2382 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2383 mlxsw_sp_fib_ht_params);
2384}
2385
2386static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2387 struct mlxsw_sp_fib_node *fib_node)
2388{
2389 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2390 mlxsw_sp_fib_ht_params);
2391}
2392
2393static struct mlxsw_sp_fib_node *
2394mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2395 size_t addr_len, unsigned char prefix_len)
2396{
2397 struct mlxsw_sp_fib_key key;
2398
2399 memset(&key, 0, sizeof(key));
2400 memcpy(key.addr, addr, addr_len);
2401 key.prefix_len = prefix_len;
2402 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2403}
2404
2405static struct mlxsw_sp_fib_node *
76610ebb 2406mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
9aecce1c
IS
2407 size_t addr_len, unsigned char prefix_len)
2408{
2409 struct mlxsw_sp_fib_node *fib_node;
2410
2411 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2412 if (!fib_node)
5b004412
JP
2413 return NULL;
2414
9aecce1c 2415 INIT_LIST_HEAD(&fib_node->entry_list);
76610ebb 2416 list_add(&fib_node->list, &fib->node_list);
9aecce1c
IS
2417 memcpy(fib_node->key.addr, addr, addr_len);
2418 fib_node->key.prefix_len = prefix_len;
9aecce1c
IS
2419
2420 return fib_node;
2421}
2422
2423static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2424{
9aecce1c
IS
2425 list_del(&fib_node->list);
2426 WARN_ON(!list_empty(&fib_node->entry_list));
2427 kfree(fib_node);
2428}
2429
2430static bool
2431mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2432 const struct mlxsw_sp_fib_entry *fib_entry)
2433{
2434 return list_first_entry(&fib_node->entry_list,
2435 struct mlxsw_sp_fib_entry, list) == fib_entry;
2436}
2437
2438static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2439{
2440 unsigned char prefix_len = fib_node->key.prefix_len;
76610ebb 2441 struct mlxsw_sp_fib *fib = fib_node->fib;
9aecce1c
IS
2442
2443 if (fib->prefix_ref_count[prefix_len]++ == 0)
2444 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2445}
2446
2447static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2448{
2449 unsigned char prefix_len = fib_node->key.prefix_len;
76610ebb 2450 struct mlxsw_sp_fib *fib = fib_node->fib;
9aecce1c
IS
2451
2452 if (--fib->prefix_ref_count[prefix_len] == 0)
2453 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
5b004412
JP
2454}
2455
76610ebb
IS
2456static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2457 struct mlxsw_sp_fib_node *fib_node,
2458 struct mlxsw_sp_fib *fib)
2459{
2460 struct mlxsw_sp_prefix_usage req_prefix_usage;
2461 struct mlxsw_sp_lpm_tree *lpm_tree;
2462 int err;
2463
2464 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2465 if (err)
2466 return err;
2467 fib_node->fib = fib;
2468
2469 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2470 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2471
2472 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2473 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2474 &req_prefix_usage);
2475 if (err)
2476 goto err_tree_check;
2477 } else {
2478 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2479 fib->proto);
2480 if (IS_ERR(lpm_tree))
2481 return PTR_ERR(lpm_tree);
2482 fib->lpm_tree = lpm_tree;
2483 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2484 if (err)
2485 goto err_tree_bind;
2486 }
2487
2488 mlxsw_sp_fib_node_prefix_inc(fib_node);
2489
2490 return 0;
2491
2492err_tree_bind:
2493 fib->lpm_tree = NULL;
2494 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2495err_tree_check:
2496 fib_node->fib = NULL;
2497 mlxsw_sp_fib_node_remove(fib, fib_node);
2498 return err;
2499}
2500
2501static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2502 struct mlxsw_sp_fib_node *fib_node)
2503{
2504 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2505 struct mlxsw_sp_fib *fib = fib_node->fib;
2506
2507 mlxsw_sp_fib_node_prefix_dec(fib_node);
2508
2509 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2510 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2511 fib->lpm_tree = NULL;
2512 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2513 } else {
2514 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2515 }
2516
2517 fib_node->fib = NULL;
2518 mlxsw_sp_fib_node_remove(fib, fib_node);
2519}
2520
9aecce1c 2521static struct mlxsw_sp_fib_node *
731ea1ca
IS
2522mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
2523 size_t addr_len, unsigned char prefix_len,
2524 enum mlxsw_sp_l3proto proto)
5b004412 2525{
9aecce1c 2526 struct mlxsw_sp_fib_node *fib_node;
76610ebb 2527 struct mlxsw_sp_fib *fib;
9aecce1c
IS
2528 struct mlxsw_sp_vr *vr;
2529 int err;
2530
731ea1ca 2531 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id);
9aecce1c
IS
2532 if (IS_ERR(vr))
2533 return ERR_CAST(vr);
731ea1ca 2534 fib = mlxsw_sp_vr_fib(vr, proto);
9aecce1c 2535
731ea1ca 2536 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
9aecce1c
IS
2537 if (fib_node)
2538 return fib_node;
5b004412 2539
731ea1ca 2540 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
9aecce1c
IS
2541 if (!fib_node) {
2542 err = -ENOMEM;
2543 goto err_fib_node_create;
5b004412 2544 }
9aecce1c 2545
76610ebb
IS
2546 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2547 if (err)
2548 goto err_fib_node_init;
2549
9aecce1c
IS
2550 return fib_node;
2551
76610ebb
IS
2552err_fib_node_init:
2553 mlxsw_sp_fib_node_destroy(fib_node);
9aecce1c 2554err_fib_node_create:
76610ebb 2555 mlxsw_sp_vr_put(vr);
9aecce1c 2556 return ERR_PTR(err);
5b004412
JP
2557}
2558
731ea1ca
IS
2559static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
2560 struct mlxsw_sp_fib_node *fib_node)
5b004412 2561{
76610ebb 2562 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
5b004412 2563
9aecce1c
IS
2564 if (!list_empty(&fib_node->entry_list))
2565 return;
76610ebb 2566 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
9aecce1c 2567 mlxsw_sp_fib_node_destroy(fib_node);
76610ebb 2568 mlxsw_sp_vr_put(vr);
61c503f9
JP
2569}
2570
9aecce1c
IS
2571static struct mlxsw_sp_fib_entry *
2572mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2573 const struct mlxsw_sp_fib_entry_params *params)
61c503f9 2574{
61c503f9 2575 struct mlxsw_sp_fib_entry *fib_entry;
9aecce1c
IS
2576
2577 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2578 if (fib_entry->params.tb_id > params->tb_id)
2579 continue;
2580 if (fib_entry->params.tb_id != params->tb_id)
2581 break;
2582 if (fib_entry->params.tos > params->tos)
2583 continue;
2584 if (fib_entry->params.prio >= params->prio ||
2585 fib_entry->params.tos < params->tos)
2586 return fib_entry;
2587 }
2588
2589 return NULL;
2590}
2591
4283bce5
IS
2592static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2593 struct mlxsw_sp_fib_entry *new_entry)
2594{
2595 struct mlxsw_sp_fib_node *fib_node;
2596
2597 if (WARN_ON(!fib_entry))
2598 return -EINVAL;
2599
2600 fib_node = fib_entry->fib_node;
2601 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2602 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2603 fib_entry->params.tos != new_entry->params.tos ||
2604 fib_entry->params.prio != new_entry->params.prio)
2605 break;
2606 }
2607
2608 list_add_tail(&new_entry->list, &fib_entry->list);
2609 return 0;
2610}
2611
9aecce1c
IS
2612static int
2613mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
4283bce5 2614 struct mlxsw_sp_fib_entry *new_entry,
599cf8f9 2615 bool replace, bool append)
9aecce1c
IS
2616{
2617 struct mlxsw_sp_fib_entry *fib_entry;
2618
2619 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2620
4283bce5
IS
2621 if (append)
2622 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
599cf8f9
IS
2623 if (replace && WARN_ON(!fib_entry))
2624 return -EINVAL;
4283bce5 2625
599cf8f9
IS
2626 /* Insert new entry before replaced one, so that we can later
2627 * remove the second.
2628 */
9aecce1c
IS
2629 if (fib_entry) {
2630 list_add_tail(&new_entry->list, &fib_entry->list);
2631 } else {
2632 struct mlxsw_sp_fib_entry *last;
2633
2634 list_for_each_entry(last, &fib_node->entry_list, list) {
2635 if (new_entry->params.tb_id > last->params.tb_id)
2636 break;
2637 fib_entry = last;
2638 }
2639
2640 if (fib_entry)
2641 list_add(&new_entry->list, &fib_entry->list);
2642 else
2643 list_add(&new_entry->list, &fib_node->entry_list);
2644 }
2645
2646 return 0;
2647}
2648
2649static void
2650mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2651{
2652 list_del(&fib_entry->list);
2653}
2654
2655static int
2656mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2657 const struct mlxsw_sp_fib_node *fib_node,
2658 struct mlxsw_sp_fib_entry *fib_entry)
2659{
2660 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2661 return 0;
2662
2663 /* To prevent packet loss, overwrite the previously offloaded
2664 * entry.
2665 */
2666 if (!list_is_singular(&fib_node->entry_list)) {
2667 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2668 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2669
2670 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2671 }
2672
2673 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2674}
2675
2676static void
2677mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2678 const struct mlxsw_sp_fib_node *fib_node,
2679 struct mlxsw_sp_fib_entry *fib_entry)
2680{
2681 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2682 return;
2683
2684 /* Promote the next entry by overwriting the deleted entry */
2685 if (!list_is_singular(&fib_node->entry_list)) {
2686 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2687 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2688
2689 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2690 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2691 return;
2692 }
2693
2694 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2695}
2696
2697static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4283bce5 2698 struct mlxsw_sp_fib_entry *fib_entry,
599cf8f9 2699 bool replace, bool append)
9aecce1c
IS
2700{
2701 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2702 int err;
2703
599cf8f9
IS
2704 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2705 append);
9aecce1c
IS
2706 if (err)
2707 return err;
2708
2709 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2710 if (err)
2711 goto err_fib4_node_entry_add;
2712
9aecce1c
IS
2713 return 0;
2714
2715err_fib4_node_entry_add:
2716 mlxsw_sp_fib4_node_list_remove(fib_entry);
2717 return err;
2718}
2719
2720static void
2721mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2722 struct mlxsw_sp_fib_entry *fib_entry)
2723{
2724 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2725
9aecce1c
IS
2726 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2727 mlxsw_sp_fib4_node_list_remove(fib_entry);
2728}
2729
599cf8f9
IS
2730static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2731 struct mlxsw_sp_fib_entry *fib_entry,
2732 bool replace)
2733{
2734 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2735 struct mlxsw_sp_fib_entry *replaced;
2736
2737 if (!replace)
2738 return;
2739
2740 /* We inserted the new entry before replaced one */
2741 replaced = list_next_entry(fib_entry, list);
2742
2743 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2744 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
731ea1ca 2745 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
599cf8f9
IS
2746}
2747
9aecce1c
IS
2748static int
2749mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
4283bce5 2750 const struct fib_entry_notifier_info *fen_info,
599cf8f9 2751 bool replace, bool append)
9aecce1c
IS
2752{
2753 struct mlxsw_sp_fib_entry *fib_entry;
2754 struct mlxsw_sp_fib_node *fib_node;
61c503f9
JP
2755 int err;
2756
9011b677 2757 if (mlxsw_sp->router->aborted)
b45f64d1
JP
2758 return 0;
2759
731ea1ca
IS
2760 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
2761 &fen_info->dst, sizeof(fen_info->dst),
2762 fen_info->dst_len,
2763 MLXSW_SP_L3_PROTO_IPV4);
9aecce1c
IS
2764 if (IS_ERR(fib_node)) {
2765 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2766 return PTR_ERR(fib_node);
b45f64d1 2767 }
61c503f9 2768
9aecce1c
IS
2769 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
2770 if (IS_ERR(fib_entry)) {
2771 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2772 err = PTR_ERR(fib_entry);
2773 goto err_fib4_entry_create;
2774 }
5b004412 2775
599cf8f9
IS
2776 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2777 append);
b45f64d1 2778 if (err) {
9aecce1c
IS
2779 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2780 goto err_fib4_node_entry_link;
b45f64d1 2781 }
9aecce1c 2782
599cf8f9
IS
2783 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2784
61c503f9
JP
2785 return 0;
2786
9aecce1c
IS
2787err_fib4_node_entry_link:
2788 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2789err_fib4_entry_create:
731ea1ca 2790 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
61c503f9
JP
2791 return err;
2792}
2793
37956d78
JP
2794static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2795 struct fib_entry_notifier_info *fen_info)
61c503f9 2796{
61c503f9 2797 struct mlxsw_sp_fib_entry *fib_entry;
9aecce1c 2798 struct mlxsw_sp_fib_node *fib_node;
61c503f9 2799
9011b677 2800 if (mlxsw_sp->router->aborted)
37956d78 2801 return;
b45f64d1 2802
9aecce1c
IS
2803 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2804 if (WARN_ON(!fib_entry))
37956d78 2805 return;
9aecce1c 2806 fib_node = fib_entry->fib_node;
5b004412 2807
9aecce1c
IS
2808 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2809 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
731ea1ca 2810 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
61c503f9 2811}
b45f64d1 2812
bc65a8a4
IS
2813static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
2814 enum mlxsw_reg_ralxx_protocol proto,
2815 u8 tree_id)
b45f64d1
JP
2816{
2817 char ralta_pl[MLXSW_REG_RALTA_LEN];
2818 char ralst_pl[MLXSW_REG_RALST_LEN];
b5d90e6d 2819 int i, err;
b45f64d1 2820
bc65a8a4 2821 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
b45f64d1
JP
2822 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2823 if (err)
2824 return err;
2825
bc65a8a4 2826 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
b45f64d1
JP
2827 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2828 if (err)
2829 return err;
2830
b5d90e6d 2831 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 2832 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
b5d90e6d
IS
2833 char raltb_pl[MLXSW_REG_RALTB_LEN];
2834 char ralue_pl[MLXSW_REG_RALUE_LEN];
b45f64d1 2835
b5d90e6d
IS
2836 if (!mlxsw_sp_vr_is_used(vr))
2837 continue;
2838
bc65a8a4 2839 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
b5d90e6d
IS
2840 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2841 raltb_pl);
2842 if (err)
2843 return err;
2844
bc65a8a4
IS
2845 mlxsw_reg_ralue_pack(ralue_pl, proto,
2846 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
b5d90e6d
IS
2847 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2848 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2849 ralue_pl);
2850 if (err)
2851 return err;
2852 }
2853
2854 return 0;
b45f64d1
JP
2855}
2856
bc65a8a4
IS
2857static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2858{
2859 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
2860 int err;
2861
2862 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
2863 MLXSW_SP_LPM_TREE_MIN);
2864 if (err)
2865 return err;
2866
2867 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
2868 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
2869 MLXSW_SP_LPM_TREE_MIN + 1);
2870}
2871
9aecce1c
IS
2872static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2873 struct mlxsw_sp_fib_node *fib_node)
2874{
2875 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2876
2877 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2878 bool do_break = &tmp->list == &fib_node->entry_list;
2879
2880 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2881 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
731ea1ca 2882 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
9aecce1c
IS
2883 /* Break when entry list is empty and node was freed.
2884 * Otherwise, we'll access freed memory in the next
2885 * iteration.
2886 */
2887 if (do_break)
2888 break;
2889 }
2890}
2891
2892static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2893 struct mlxsw_sp_fib_node *fib_node)
2894{
76610ebb 2895 switch (fib_node->fib->proto) {
9aecce1c
IS
2896 case MLXSW_SP_L3_PROTO_IPV4:
2897 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2898 break;
2899 case MLXSW_SP_L3_PROTO_IPV6:
2900 WARN_ON_ONCE(1);
2901 break;
2902 }
2903}
2904
76610ebb
IS
2905static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2906 struct mlxsw_sp_vr *vr,
2907 enum mlxsw_sp_l3proto proto)
b45f64d1 2908{
76610ebb 2909 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
9aecce1c 2910 struct mlxsw_sp_fib_node *fib_node, *tmp;
76610ebb
IS
2911
2912 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2913 bool do_break = &tmp->list == &fib->node_list;
2914
2915 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2916 if (do_break)
2917 break;
2918 }
2919}
2920
2921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
2922{
b45f64d1 2923 int i;
b45f64d1 2924
c1a38311 2925 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 2926 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
ac571de9 2927
76610ebb 2928 if (!mlxsw_sp_vr_is_used(vr))
b45f64d1 2929 continue;
76610ebb 2930 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
a3d9bc50
IS
2931
2932 /* If virtual router was only used for IPv4, then it's no
2933 * longer used.
2934 */
2935 if (!mlxsw_sp_vr_is_used(vr))
2936 continue;
2937 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
b45f64d1 2938 }
ac571de9
IS
2939}
2940
bc65a8a4 2941static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
ac571de9
IS
2942{
2943 int err;
2944
9011b677 2945 if (mlxsw_sp->router->aborted)
d331d303
IS
2946 return;
2947 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
ac571de9 2948 mlxsw_sp_router_fib_flush(mlxsw_sp);
9011b677 2949 mlxsw_sp->router->aborted = true;
b45f64d1
JP
2950 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2951 if (err)
2952 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2953}
2954
3057224e 2955struct mlxsw_sp_fib_event_work {
a0e4761d 2956 struct work_struct work;
ad178c8e
IS
2957 union {
2958 struct fib_entry_notifier_info fen_info;
5d7bfd14 2959 struct fib_rule_notifier_info fr_info;
ad178c8e
IS
2960 struct fib_nh_notifier_info fnh_info;
2961 };
3057224e
IS
2962 struct mlxsw_sp *mlxsw_sp;
2963 unsigned long event;
2964};
2965
2966static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
b45f64d1 2967{
3057224e 2968 struct mlxsw_sp_fib_event_work *fib_work =
a0e4761d 2969 container_of(work, struct mlxsw_sp_fib_event_work, work);
3057224e 2970 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5d7bfd14 2971 struct fib_rule *rule;
599cf8f9 2972 bool replace, append;
b45f64d1
JP
2973 int err;
2974
3057224e
IS
2975 /* Protect internal structures from changes */
2976 rtnl_lock();
2977 switch (fib_work->event) {
599cf8f9 2978 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4283bce5 2979 case FIB_EVENT_ENTRY_APPEND: /* fall through */
b45f64d1 2980 case FIB_EVENT_ENTRY_ADD:
599cf8f9 2981 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
4283bce5
IS
2982 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2983 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
599cf8f9 2984 replace, append);
b45f64d1 2985 if (err)
bc65a8a4 2986 mlxsw_sp_router_fib_abort(mlxsw_sp);
3057224e 2987 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2988 break;
2989 case FIB_EVENT_ENTRY_DEL:
3057224e
IS
2990 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2991 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2992 break;
2993 case FIB_EVENT_RULE_ADD: /* fall through */
2994 case FIB_EVENT_RULE_DEL:
5d7bfd14 2995 rule = fib_work->fr_info.rule;
c7f6e665 2996 if (!fib4_rule_default(rule) && !rule->l3mdev)
bc65a8a4 2997 mlxsw_sp_router_fib_abort(mlxsw_sp);
5d7bfd14 2998 fib_rule_put(rule);
b45f64d1 2999 break;
ad178c8e
IS
3000 case FIB_EVENT_NH_ADD: /* fall through */
3001 case FIB_EVENT_NH_DEL:
3002 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
3003 fib_work->fnh_info.fib_nh);
3004 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
3005 break;
b45f64d1 3006 }
3057224e
IS
3007 rtnl_unlock();
3008 kfree(fib_work);
3009}
3010
3011/* Called with rcu_read_lock() */
3012static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
3013 unsigned long event, void *ptr)
3014{
3057224e
IS
3015 struct mlxsw_sp_fib_event_work *fib_work;
3016 struct fib_notifier_info *info = ptr;
7e39d115 3017 struct mlxsw_sp_router *router;
3057224e
IS
3018
3019 if (!net_eq(info->net, &init_net))
3020 return NOTIFY_DONE;
3021
3022 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
3023 if (WARN_ON(!fib_work))
3024 return NOTIFY_BAD;
3025
a0e4761d 3026 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
7e39d115
IS
3027 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3028 fib_work->mlxsw_sp = router->mlxsw_sp;
3057224e
IS
3029 fib_work->event = event;
3030
3031 switch (event) {
599cf8f9 3032 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4283bce5 3033 case FIB_EVENT_ENTRY_APPEND: /* fall through */
3057224e
IS
3034 case FIB_EVENT_ENTRY_ADD: /* fall through */
3035 case FIB_EVENT_ENTRY_DEL:
3036 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
3037 /* Take referece on fib_info to prevent it from being
3038 * freed while work is queued. Release it afterwards.
3039 */
3040 fib_info_hold(fib_work->fen_info.fi);
3041 break;
5d7bfd14
IS
3042 case FIB_EVENT_RULE_ADD: /* fall through */
3043 case FIB_EVENT_RULE_DEL:
3044 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
3045 fib_rule_get(fib_work->fr_info.rule);
3046 break;
ad178c8e
IS
3047 case FIB_EVENT_NH_ADD: /* fall through */
3048 case FIB_EVENT_NH_DEL:
3049 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
3050 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
3051 break;
3057224e
IS
3052 }
3053
a0e4761d 3054 mlxsw_core_schedule_work(&fib_work->work);
3057224e 3055
b45f64d1
JP
3056 return NOTIFY_DONE;
3057}
3058
4724ba56
IS
3059static struct mlxsw_sp_rif *
3060mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
3061 const struct net_device *dev)
3062{
3063 int i;
3064
3065 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
5f9efffb
IS
3066 if (mlxsw_sp->router->rifs[i] &&
3067 mlxsw_sp->router->rifs[i]->dev == dev)
3068 return mlxsw_sp->router->rifs[i];
4724ba56
IS
3069
3070 return NULL;
3071}
3072
3073static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
3074{
3075 char ritr_pl[MLXSW_REG_RITR_LEN];
3076 int err;
3077
3078 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3079 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3080 if (WARN_ON_ONCE(err))
3081 return err;
3082
3083 mlxsw_reg_ritr_enable_set(ritr_pl, false);
3084 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3085}
3086
3087static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 3088 struct mlxsw_sp_rif *rif)
4724ba56 3089{
bf95233e
AS
3090 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
3091 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
3092 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
4724ba56
IS
3093}
3094
5ea1237f
AS
3095static bool
3096mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
3097 unsigned long event)
4724ba56 3098{
5ea1237f
AS
3099 struct inet6_dev *inet6_dev;
3100 bool addr_list_empty = true;
3101 struct in_device *idev;
3102
4724ba56
IS
3103 switch (event) {
3104 case NETDEV_UP:
bf95233e 3105 if (!rif)
4724ba56
IS
3106 return true;
3107 return false;
3108 case NETDEV_DOWN:
5ea1237f
AS
3109 idev = __in_dev_get_rtnl(dev);
3110 if (idev && idev->ifa_list)
3111 addr_list_empty = false;
3112
3113 inet6_dev = __in6_dev_get(dev);
3114 if (addr_list_empty && inet6_dev &&
3115 !list_empty(&inet6_dev->addr_list))
3116 addr_list_empty = false;
3117
3118 if (rif && addr_list_empty &&
bf95233e 3119 !netif_is_l3_slave(rif->dev))
4724ba56
IS
3120 return true;
3121 /* It is possible we already removed the RIF ourselves
3122 * if it was assigned to a netdev that is now a bridge
3123 * or LAG slave.
3124 */
3125 return false;
3126 }
3127
3128 return false;
3129}
3130
e4f3c1c1
IS
3131static enum mlxsw_sp_rif_type
3132mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
3133 const struct net_device *dev)
3134{
3135 enum mlxsw_sp_fid_type type;
3136
3137 /* RIF type is derived from the type of the underlying FID */
3138 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
3139 type = MLXSW_SP_FID_TYPE_8021Q;
3140 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
3141 type = MLXSW_SP_FID_TYPE_8021Q;
3142 else if (netif_is_bridge_master(dev))
3143 type = MLXSW_SP_FID_TYPE_8021D;
3144 else
3145 type = MLXSW_SP_FID_TYPE_RFID;
3146
3147 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
3148}
3149
de5ed99e 3150static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
4724ba56
IS
3151{
3152 int i;
3153
de5ed99e
IS
3154 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
3155 if (!mlxsw_sp->router->rifs[i]) {
3156 *p_rif_index = i;
3157 return 0;
3158 }
3159 }
4724ba56 3160
de5ed99e 3161 return -ENOBUFS;
4724ba56
IS
3162}
3163
e4f3c1c1
IS
3164static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
3165 u16 vr_id,
3166 struct net_device *l3_dev)
4724ba56 3167{
bf95233e 3168 struct mlxsw_sp_rif *rif;
4724ba56 3169
e4f3c1c1 3170 rif = kzalloc(rif_size, GFP_KERNEL);
bf95233e 3171 if (!rif)
4724ba56
IS
3172 return NULL;
3173
bf95233e
AS
3174 INIT_LIST_HEAD(&rif->nexthop_list);
3175 INIT_LIST_HEAD(&rif->neigh_list);
3176 ether_addr_copy(rif->addr, l3_dev->dev_addr);
3177 rif->mtu = l3_dev->mtu;
3178 rif->vr_id = vr_id;
3179 rif->dev = l3_dev;
3180 rif->rif_index = rif_index;
4724ba56 3181
bf95233e 3182 return rif;
4724ba56
IS
3183}
3184
5f9efffb
IS
3185struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
3186 u16 rif_index)
3187{
3188 return mlxsw_sp->router->rifs[rif_index];
3189}
3190
fd1b9d41
AS
3191u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
3192{
3193 return rif->rif_index;
3194}
3195
3196int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
3197{
3198 return rif->dev->ifindex;
3199}
3200
4724ba56 3201static struct mlxsw_sp_rif *
e4f3c1c1
IS
3202mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
3203 const struct mlxsw_sp_rif_params *params)
4724ba56 3204{
e4f3c1c1
IS
3205 u32 tb_id = l3mdev_fib_table(params->dev);
3206 const struct mlxsw_sp_rif_ops *ops;
3207 enum mlxsw_sp_rif_type type;
bf95233e 3208 struct mlxsw_sp_rif *rif;
a1107487
IS
3209 struct mlxsw_sp_fid *fid;
3210 struct mlxsw_sp_vr *vr;
3211 u16 rif_index;
4724ba56
IS
3212 int err;
3213
e4f3c1c1
IS
3214 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
3215 ops = mlxsw_sp->router->rif_ops_arr[type];
3216
c9ec53f0
IS
3217 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
3218 if (IS_ERR(vr))
3219 return ERR_CAST(vr);
3220
de5ed99e
IS
3221 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
3222 if (err)
3223 goto err_rif_index_alloc;
4724ba56 3224
e4f3c1c1 3225 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
a13a594d
IS
3226 if (!rif) {
3227 err = -ENOMEM;
3228 goto err_rif_alloc;
3229 }
e4f3c1c1
IS
3230 rif->mlxsw_sp = mlxsw_sp;
3231 rif->ops = ops;
a13a594d 3232
e4f3c1c1
IS
3233 fid = ops->fid_get(rif);
3234 if (IS_ERR(fid)) {
3235 err = PTR_ERR(fid);
3236 goto err_fid_get;
4d93ceeb 3237 }
e4f3c1c1 3238 rif->fid = fid;
4d93ceeb 3239
e4f3c1c1
IS
3240 if (ops->setup)
3241 ops->setup(rif, params);
3242
3243 err = ops->configure(rif);
4724ba56 3244 if (err)
e4f3c1c1 3245 goto err_configure;
4724ba56 3246
e4f3c1c1 3247 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
a1107487 3248 mlxsw_sp_fid_index(fid), true);
4724ba56
IS
3249 if (err)
3250 goto err_rif_fdb_op;
3251
e4f3c1c1 3252 mlxsw_sp_rif_counters_alloc(rif);
a1107487 3253 mlxsw_sp_fid_rif_set(fid, rif);
5f9efffb 3254 mlxsw_sp->router->rifs[rif_index] = rif;
6913229e 3255 vr->rif_count++;
4724ba56 3256
bf95233e 3257 return rif;
4724ba56 3258
4724ba56 3259err_rif_fdb_op:
e4f3c1c1
IS
3260 ops->deconfigure(rif);
3261err_configure:
a1107487
IS
3262 mlxsw_sp_fid_put(fid);
3263err_fid_get:
e4f3c1c1
IS
3264 kfree(rif);
3265err_rif_alloc:
de5ed99e 3266err_rif_index_alloc:
c9ec53f0 3267 mlxsw_sp_vr_put(vr);
4724ba56
IS
3268 return ERR_PTR(err);
3269}
3270
e4f3c1c1 3271void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
4724ba56 3272{
e4f3c1c1
IS
3273 const struct mlxsw_sp_rif_ops *ops = rif->ops;
3274 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
a1107487 3275 struct mlxsw_sp_fid *fid = rif->fid;
e4f3c1c1 3276 struct mlxsw_sp_vr *vr;
4724ba56 3277
bf95233e 3278 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
e4f3c1c1 3279 vr = &mlxsw_sp->router->vrs[rif->vr_id];
e0c0afd8 3280
6913229e 3281 vr->rif_count--;
e4f3c1c1 3282 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
a1107487 3283 mlxsw_sp_fid_rif_set(fid, NULL);
e4f3c1c1
IS
3284 mlxsw_sp_rif_counters_free(rif);
3285 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
3286 mlxsw_sp_fid_index(fid), false);
3287 ops->deconfigure(rif);
a1107487 3288 mlxsw_sp_fid_put(fid);
e4f3c1c1 3289 kfree(rif);
c9ec53f0 3290 mlxsw_sp_vr_put(vr);
4724ba56
IS
3291}
3292
e4f3c1c1
IS
3293static void
3294mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
3295 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
3296{
3297 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
3298
3299 params->vid = mlxsw_sp_port_vlan->vid;
3300 params->lag = mlxsw_sp_port->lagged;
3301 if (params->lag)
3302 params->lag_id = mlxsw_sp_port->lag_id;
3303 else
3304 params->system_port = mlxsw_sp_port->local_port;
3305}
3306
7cbecf24 3307static int
a1107487 3308mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
7cbecf24 3309 struct net_device *l3_dev)
4724ba56 3310{
7cbecf24 3311 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1b8f09a0 3312 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
7cbecf24 3313 u16 vid = mlxsw_sp_port_vlan->vid;
bf95233e 3314 struct mlxsw_sp_rif *rif;
a1107487 3315 struct mlxsw_sp_fid *fid;
03ea01e9 3316 int err;
4724ba56 3317
1b8f09a0 3318 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
bf95233e 3319 if (!rif) {
e4f3c1c1
IS
3320 struct mlxsw_sp_rif_params params = {
3321 .dev = l3_dev,
3322 };
3323
3324 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
3325 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
bf95233e
AS
3326 if (IS_ERR(rif))
3327 return PTR_ERR(rif);
4724ba56
IS
3328 }
3329
a1107487 3330 /* FID was already created, just take a reference */
e4f3c1c1 3331 fid = rif->ops->fid_get(rif);
a1107487
IS
3332 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
3333 if (err)
3334 goto err_fid_port_vid_map;
3335
7cbecf24 3336 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
03ea01e9
IS
3337 if (err)
3338 goto err_port_vid_learning_set;
3339
7cbecf24 3340 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
03ea01e9
IS
3341 BR_STATE_FORWARDING);
3342 if (err)
3343 goto err_port_vid_stp_set;
3344
a1107487 3345 mlxsw_sp_port_vlan->fid = fid;
4724ba56 3346
4724ba56 3347 return 0;
03ea01e9
IS
3348
3349err_port_vid_stp_set:
7cbecf24 3350 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
03ea01e9 3351err_port_vid_learning_set:
a1107487
IS
3352 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3353err_fid_port_vid_map:
3354 mlxsw_sp_fid_put(fid);
03ea01e9 3355 return err;
4724ba56
IS
3356}
3357
a1107487
IS
3358void
3359mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4724ba56 3360{
ce95e154 3361 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7cbecf24 3362 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
ce95e154 3363 u16 vid = mlxsw_sp_port_vlan->vid;
ce95e154 3364
a1107487
IS
3365 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
3366 return;
4aafc368 3367
a1107487 3368 mlxsw_sp_port_vlan->fid = NULL;
7cbecf24
IS
3369 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
3370 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
a1107487
IS
3371 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3372 /* If router port holds the last reference on the rFID, then the
3373 * associated Sub-port RIF will be destroyed.
3374 */
3375 mlxsw_sp_fid_put(fid);
4724ba56
IS
3376}
3377
7cbecf24
IS
3378static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
3379 struct net_device *port_dev,
3380 unsigned long event, u16 vid)
4724ba56
IS
3381{
3382 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
ce95e154 3383 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4724ba56 3384
ce95e154 3385 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
7cbecf24
IS
3386 if (WARN_ON(!mlxsw_sp_port_vlan))
3387 return -EINVAL;
4724ba56
IS
3388
3389 switch (event) {
3390 case NETDEV_UP:
a1107487 3391 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
7cbecf24 3392 l3_dev);
4724ba56 3393 case NETDEV_DOWN:
a1107487 3394 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4724ba56
IS
3395 break;
3396 }
3397
3398 return 0;
3399}
3400
3401static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3402 unsigned long event)
3403{
2b94e58d
JP
3404 if (netif_is_bridge_port(port_dev) ||
3405 netif_is_lag_port(port_dev) ||
3406 netif_is_ovs_port(port_dev))
4724ba56
IS
3407 return 0;
3408
7cbecf24 3409 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
4724ba56
IS
3410}
3411
3412static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3413 struct net_device *lag_dev,
3414 unsigned long event, u16 vid)
3415{
3416 struct net_device *port_dev;
3417 struct list_head *iter;
3418 int err;
3419
3420 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3421 if (mlxsw_sp_port_dev_check(port_dev)) {
7cbecf24
IS
3422 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
3423 port_dev,
3424 event, vid);
4724ba56
IS
3425 if (err)
3426 return err;
3427 }
3428 }
3429
3430 return 0;
3431}
3432
3433static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3434 unsigned long event)
3435{
3436 if (netif_is_bridge_port(lag_dev))
3437 return 0;
3438
3439 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3440}
3441
4724ba56 3442static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
4724ba56
IS
3443 unsigned long event)
3444{
3445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
e4f3c1c1
IS
3446 struct mlxsw_sp_rif_params params = {
3447 .dev = l3_dev,
3448 };
a1107487 3449 struct mlxsw_sp_rif *rif;
4724ba56
IS
3450
3451 switch (event) {
3452 case NETDEV_UP:
e4f3c1c1
IS
3453 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
3454 if (IS_ERR(rif))
3455 return PTR_ERR(rif);
3456 break;
4724ba56 3457 case NETDEV_DOWN:
a1107487 3458 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
e4f3c1c1 3459 mlxsw_sp_rif_destroy(rif);
4724ba56
IS
3460 break;
3461 }
3462
3463 return 0;
3464}
3465
3466static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3467 unsigned long event)
3468{
3469 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4724ba56
IS
3470 u16 vid = vlan_dev_vlan_id(vlan_dev);
3471
6b27c8ad
IS
3472 if (netif_is_bridge_port(vlan_dev))
3473 return 0;
3474
4724ba56 3475 if (mlxsw_sp_port_dev_check(real_dev))
7cbecf24
IS
3476 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
3477 event, vid);
4724ba56
IS
3478 else if (netif_is_lag_master(real_dev))
3479 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3480 vid);
c57529e1 3481 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
a1107487 3482 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
4724ba56
IS
3483
3484 return 0;
3485}
3486
b1e45526
IS
3487static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3488 unsigned long event)
3489{
3490 if (mlxsw_sp_port_dev_check(dev))
3491 return mlxsw_sp_inetaddr_port_event(dev, event);
3492 else if (netif_is_lag_master(dev))
3493 return mlxsw_sp_inetaddr_lag_event(dev, event);
3494 else if (netif_is_bridge_master(dev))
a1107487 3495 return mlxsw_sp_inetaddr_bridge_event(dev, event);
b1e45526
IS
3496 else if (is_vlan_dev(dev))
3497 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3498 else
3499 return 0;
3500}
3501
4724ba56
IS
3502int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3503 unsigned long event, void *ptr)
3504{
3505 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3506 struct net_device *dev = ifa->ifa_dev->dev;
3507 struct mlxsw_sp *mlxsw_sp;
bf95233e 3508 struct mlxsw_sp_rif *rif;
4724ba56
IS
3509 int err = 0;
3510
3511 mlxsw_sp = mlxsw_sp_lower_get(dev);
3512 if (!mlxsw_sp)
3513 goto out;
3514
bf95233e 3515 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5ea1237f 3516 if (!mlxsw_sp_rif_should_config(rif, dev, event))
4724ba56
IS
3517 goto out;
3518
b1e45526 3519 err = __mlxsw_sp_inetaddr_event(dev, event);
4724ba56
IS
3520out:
3521 return notifier_from_errno(err);
3522}
3523
5ea1237f
AS
3524struct mlxsw_sp_inet6addr_event_work {
3525 struct work_struct work;
3526 struct net_device *dev;
3527 unsigned long event;
3528};
3529
3530static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
3531{
3532 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
3533 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
3534 struct net_device *dev = inet6addr_work->dev;
3535 unsigned long event = inet6addr_work->event;
3536 struct mlxsw_sp *mlxsw_sp;
3537 struct mlxsw_sp_rif *rif;
3538
3539 rtnl_lock();
3540 mlxsw_sp = mlxsw_sp_lower_get(dev);
3541 if (!mlxsw_sp)
3542 goto out;
3543
3544 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3545 if (!mlxsw_sp_rif_should_config(rif, dev, event))
3546 goto out;
3547
3548 __mlxsw_sp_inetaddr_event(dev, event);
3549out:
3550 rtnl_unlock();
3551 dev_put(dev);
3552 kfree(inet6addr_work);
3553}
3554
3555/* Called with rcu_read_lock() */
3556int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
3557 unsigned long event, void *ptr)
3558{
3559 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
3560 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
3561 struct net_device *dev = if6->idev->dev;
3562
3563 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
3564 return NOTIFY_DONE;
3565
3566 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
3567 if (!inet6addr_work)
3568 return NOTIFY_BAD;
3569
3570 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
3571 inet6addr_work->dev = dev;
3572 inet6addr_work->event = event;
3573 dev_hold(dev);
3574 mlxsw_core_schedule_work(&inet6addr_work->work);
3575
3576 return NOTIFY_DONE;
3577}
3578
bf95233e 3579static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
4724ba56
IS
3580 const char *mac, int mtu)
3581{
3582 char ritr_pl[MLXSW_REG_RITR_LEN];
3583 int err;
3584
bf95233e 3585 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
4724ba56
IS
3586 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3587 if (err)
3588 return err;
3589
3590 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3591 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3592 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3593 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3594}
3595
3596int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3597{
3598 struct mlxsw_sp *mlxsw_sp;
bf95233e 3599 struct mlxsw_sp_rif *rif;
a1107487 3600 u16 fid_index;
4724ba56
IS
3601 int err;
3602
3603 mlxsw_sp = mlxsw_sp_lower_get(dev);
3604 if (!mlxsw_sp)
3605 return 0;
3606
bf95233e
AS
3607 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3608 if (!rif)
4724ba56 3609 return 0;
a1107487 3610 fid_index = mlxsw_sp_fid_index(rif->fid);
4724ba56 3611
a1107487 3612 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
4724ba56
IS
3613 if (err)
3614 return err;
3615
bf95233e
AS
3616 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3617 dev->mtu);
4724ba56
IS
3618 if (err)
3619 goto err_rif_edit;
3620
a1107487 3621 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
4724ba56
IS
3622 if (err)
3623 goto err_rif_fdb_op;
3624
bf95233e
AS
3625 ether_addr_copy(rif->addr, dev->dev_addr);
3626 rif->mtu = dev->mtu;
4724ba56 3627
bf95233e 3628 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
4724ba56
IS
3629
3630 return 0;
3631
3632err_rif_fdb_op:
bf95233e 3633 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
4724ba56 3634err_rif_edit:
a1107487 3635 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
4724ba56
IS
3636 return err;
3637}
3638
b1e45526
IS
3639static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3640 struct net_device *l3_dev)
7179eb5a 3641{
b1e45526 3642 struct mlxsw_sp_rif *rif;
7179eb5a 3643
b1e45526
IS
3644 /* If netdev is already associated with a RIF, then we need to
3645 * destroy it and create a new one with the new virtual router ID.
7179eb5a 3646 */
b1e45526
IS
3647 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3648 if (rif)
3649 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
7179eb5a 3650
b1e45526 3651 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
7179eb5a
IS
3652}
3653
b1e45526
IS
3654static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3655 struct net_device *l3_dev)
7179eb5a 3656{
b1e45526 3657 struct mlxsw_sp_rif *rif;
7179eb5a 3658
b1e45526
IS
3659 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3660 if (!rif)
7179eb5a 3661 return;
b1e45526 3662 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
7179eb5a
IS
3663}
3664
b1e45526
IS
3665int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3666 struct netdev_notifier_changeupper_info *info)
3d70e458 3667{
b1e45526
IS
3668 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3669 int err = 0;
3d70e458 3670
b1e45526
IS
3671 if (!mlxsw_sp)
3672 return 0;
3d70e458 3673
b1e45526
IS
3674 switch (event) {
3675 case NETDEV_PRECHANGEUPPER:
3676 return 0;
3677 case NETDEV_CHANGEUPPER:
3678 if (info->linking)
3679 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3680 else
3681 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3682 break;
3683 }
3d70e458 3684
b1e45526 3685 return err;
3d70e458
IS
3686}
3687
e4f3c1c1
IS
3688static struct mlxsw_sp_rif_subport *
3689mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
a1107487 3690{
e4f3c1c1
IS
3691 return container_of(rif, struct mlxsw_sp_rif_subport, common);
3692}
3693
3694static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
3695 const struct mlxsw_sp_rif_params *params)
3696{
3697 struct mlxsw_sp_rif_subport *rif_subport;
3698
3699 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3700 rif_subport->vid = params->vid;
3701 rif_subport->lag = params->lag;
3702 if (params->lag)
3703 rif_subport->lag_id = params->lag_id;
a1107487 3704 else
e4f3c1c1
IS
3705 rif_subport->system_port = params->system_port;
3706}
3707
3708static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
3709{
3710 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3711 struct mlxsw_sp_rif_subport *rif_subport;
3712 char ritr_pl[MLXSW_REG_RITR_LEN];
3713
3714 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3715 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
3716 rif->rif_index, rif->vr_id, rif->dev->mtu,
3717 rif->dev->dev_addr);
3718 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
3719 rif_subport->lag ? rif_subport->lag_id :
3720 rif_subport->system_port,
3721 rif_subport->vid);
3722
3723 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3724}
3725
3726static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
3727{
3728 return mlxsw_sp_rif_subport_op(rif, true);
a1107487
IS
3729}
3730
e4f3c1c1
IS
3731static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
3732{
3733 mlxsw_sp_rif_subport_op(rif, false);
3734}
3735
3736static struct mlxsw_sp_fid *
3737mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
3738{
3739 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
3740}
3741
3742static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
3743 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
3744 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
3745 .setup = mlxsw_sp_rif_subport_setup,
3746 .configure = mlxsw_sp_rif_subport_configure,
3747 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
3748 .fid_get = mlxsw_sp_rif_subport_fid_get,
3749};
3750
3751static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
3752 enum mlxsw_reg_ritr_if_type type,
3753 u16 vid_fid, bool enable)
3754{
3755 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3756 char ritr_pl[MLXSW_REG_RITR_LEN];
3757
3758 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
3759 rif->dev->mtu, rif->dev->dev_addr);
3760 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
3761
3762 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3763}
3764
3765static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3766{
3767 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3768}
3769
3770static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
3771{
3772 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3773 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3774 int err;
3775
3776 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
3777 if (err)
3778 return err;
3779
0d284818
IS
3780 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
3781 mlxsw_sp_router_port(mlxsw_sp), true);
3782 if (err)
3783 goto err_fid_mc_flood_set;
3784
e4f3c1c1
IS
3785 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3786 mlxsw_sp_router_port(mlxsw_sp), true);
3787 if (err)
3788 goto err_fid_bc_flood_set;
3789
3790 return 0;
3791
3792err_fid_bc_flood_set:
0d284818
IS
3793 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
3794 mlxsw_sp_router_port(mlxsw_sp), false);
3795err_fid_mc_flood_set:
e4f3c1c1
IS
3796 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3797 return err;
3798}
3799
3800static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
3801{
3802 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3803 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3804
3805 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3806 mlxsw_sp_router_port(mlxsw_sp), false);
0d284818
IS
3807 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
3808 mlxsw_sp_router_port(mlxsw_sp), false);
e4f3c1c1
IS
3809 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3810}
3811
3812static struct mlxsw_sp_fid *
3813mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
3814{
3815 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
3816
3817 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
3818}
3819
3820static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
3821 .type = MLXSW_SP_RIF_TYPE_VLAN,
3822 .rif_size = sizeof(struct mlxsw_sp_rif),
3823 .configure = mlxsw_sp_rif_vlan_configure,
3824 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
3825 .fid_get = mlxsw_sp_rif_vlan_fid_get,
3826};
3827
3828static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
3829{
3830 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3831 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3832 int err;
3833
3834 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
3835 true);
3836 if (err)
3837 return err;
3838
0d284818
IS
3839 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
3840 mlxsw_sp_router_port(mlxsw_sp), true);
3841 if (err)
3842 goto err_fid_mc_flood_set;
3843
e4f3c1c1
IS
3844 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3845 mlxsw_sp_router_port(mlxsw_sp), true);
3846 if (err)
3847 goto err_fid_bc_flood_set;
3848
3849 return 0;
3850
3851err_fid_bc_flood_set:
0d284818
IS
3852 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
3853 mlxsw_sp_router_port(mlxsw_sp), false);
3854err_fid_mc_flood_set:
e4f3c1c1
IS
3855 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3856 return err;
3857}
3858
3859static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
3860{
3861 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3862 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3863
3864 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3865 mlxsw_sp_router_port(mlxsw_sp), false);
0d284818
IS
3866 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
3867 mlxsw_sp_router_port(mlxsw_sp), false);
e4f3c1c1
IS
3868 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3869}
3870
3871static struct mlxsw_sp_fid *
3872mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
3873{
3874 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
3875}
3876
3877static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
3878 .type = MLXSW_SP_RIF_TYPE_FID,
3879 .rif_size = sizeof(struct mlxsw_sp_rif),
3880 .configure = mlxsw_sp_rif_fid_configure,
3881 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
3882 .fid_get = mlxsw_sp_rif_fid_fid_get,
3883};
3884
3885static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
3886 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
3887 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
3888 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
3889};
3890
348b8fc3
IS
3891static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
3892{
3893 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3894
3895 mlxsw_sp->router->rifs = kcalloc(max_rifs,
3896 sizeof(struct mlxsw_sp_rif *),
3897 GFP_KERNEL);
3898 if (!mlxsw_sp->router->rifs)
3899 return -ENOMEM;
e4f3c1c1
IS
3900
3901 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
3902
348b8fc3
IS
3903 return 0;
3904}
3905
3906static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
3907{
3908 int i;
3909
3910 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3911 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
3912
3913 kfree(mlxsw_sp->router->rifs);
3914}
3915
c3852ef7
IS
3916static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3917{
7e39d115 3918 struct mlxsw_sp_router *router;
c3852ef7
IS
3919
3920 /* Flush pending FIB notifications and then flush the device's
3921 * table before requesting another dump. The FIB notification
3922 * block is unregistered, so no need to take RTNL.
3923 */
3924 mlxsw_core_flush_owq();
7e39d115
IS
3925 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3926 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
c3852ef7
IS
3927}
3928
4724ba56
IS
3929static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3930{
3931 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3932 u64 max_rifs;
3933 int err;
3934
3935 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3936 return -EIO;
4724ba56 3937 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
4724ba56 3938
e29237e7 3939 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
4724ba56
IS
3940 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3941 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3942 if (err)
348b8fc3 3943 return err;
4724ba56 3944 return 0;
4724ba56
IS
3945}
3946
3947static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3948{
3949 char rgcr_pl[MLXSW_REG_RGCR_LEN];
4724ba56 3950
e29237e7 3951 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
4724ba56 3952 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
4724ba56
IS
3953}
3954
b45f64d1
JP
3955int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3956{
9011b677 3957 struct mlxsw_sp_router *router;
b45f64d1
JP
3958 int err;
3959
9011b677
IS
3960 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3961 if (!router)
3962 return -ENOMEM;
3963 mlxsw_sp->router = router;
3964 router->mlxsw_sp = mlxsw_sp;
3965
3966 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
b45f64d1
JP
3967 err = __mlxsw_sp_router_init(mlxsw_sp);
3968 if (err)
9011b677 3969 goto err_router_init;
b45f64d1 3970
348b8fc3
IS
3971 err = mlxsw_sp_rifs_init(mlxsw_sp);
3972 if (err)
3973 goto err_rifs_init;
3974
9011b677 3975 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
c53b8e1b
IS
3976 &mlxsw_sp_nexthop_ht_params);
3977 if (err)
3978 goto err_nexthop_ht_init;
3979
9011b677 3980 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
3981 &mlxsw_sp_nexthop_group_ht_params);
3982 if (err)
3983 goto err_nexthop_group_ht_init;
3984
8494ab06
IS
3985 err = mlxsw_sp_lpm_init(mlxsw_sp);
3986 if (err)
3987 goto err_lpm_init;
3988
b45f64d1
JP
3989 err = mlxsw_sp_vrs_init(mlxsw_sp);
3990 if (err)
3991 goto err_vrs_init;
3992
8c9583a8 3993 err = mlxsw_sp_neigh_init(mlxsw_sp);
b45f64d1
JP
3994 if (err)
3995 goto err_neigh_init;
3996
7e39d115
IS
3997 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
3998 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
c3852ef7
IS
3999 mlxsw_sp_router_fib_dump_flush);
4000 if (err)
4001 goto err_register_fib_notifier;
4002
b45f64d1
JP
4003 return 0;
4004
c3852ef7
IS
4005err_register_fib_notifier:
4006 mlxsw_sp_neigh_fini(mlxsw_sp);
b45f64d1
JP
4007err_neigh_init:
4008 mlxsw_sp_vrs_fini(mlxsw_sp);
4009err_vrs_init:
8494ab06
IS
4010 mlxsw_sp_lpm_fini(mlxsw_sp);
4011err_lpm_init:
9011b677 4012 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
e9ad5e7d 4013err_nexthop_group_ht_init:
9011b677 4014 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
c53b8e1b 4015err_nexthop_ht_init:
348b8fc3
IS
4016 mlxsw_sp_rifs_fini(mlxsw_sp);
4017err_rifs_init:
b45f64d1 4018 __mlxsw_sp_router_fini(mlxsw_sp);
9011b677
IS
4019err_router_init:
4020 kfree(mlxsw_sp->router);
b45f64d1
JP
4021 return err;
4022}
4023
4024void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
4025{
7e39d115 4026 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
b45f64d1
JP
4027 mlxsw_sp_neigh_fini(mlxsw_sp);
4028 mlxsw_sp_vrs_fini(mlxsw_sp);
8494ab06 4029 mlxsw_sp_lpm_fini(mlxsw_sp);
9011b677
IS
4030 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
4031 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
348b8fc3 4032 mlxsw_sp_rifs_fini(mlxsw_sp);
b45f64d1 4033 __mlxsw_sp_router_fini(mlxsw_sp);
9011b677 4034 kfree(mlxsw_sp->router);
b45f64d1 4035}