]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
mlxsw: spectrum_router: Add missing rollback
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_router.c
CommitLineData
464dce18
IS
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
c723c735 6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
464dce18
IS
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
5e9c16cc
JP
39#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
c723c735 42#include <linux/notifier.h>
df6dd79b 43#include <linux/inetdevice.h>
9db032bb 44#include <linux/netdevice.h>
03ea01e9 45#include <linux/if_bridge.h>
c723c735 46#include <net/netevent.h>
6cf3c971
JP
47#include <net/neighbour.h>
48#include <net/arp.h>
b45f64d1 49#include <net/ip_fib.h>
5d7bfd14 50#include <net/fib_rules.h>
57837885 51#include <net/l3mdev.h>
464dce18
IS
52
53#include "spectrum.h"
54#include "core.h"
55#include "reg.h"
e0c0afd8
AS
56#include "spectrum_cnt.h"
57#include "spectrum_dpipe.h"
58#include "spectrum_router.h"
464dce18 59
9011b677
IS
60struct mlxsw_sp_vr;
61struct mlxsw_sp_lpm_tree;
e4f3c1c1 62struct mlxsw_sp_rif_ops;
9011b677
IS
63
64struct mlxsw_sp_router {
65 struct mlxsw_sp *mlxsw_sp;
5f9efffb 66 struct mlxsw_sp_rif **rifs;
9011b677
IS
67 struct mlxsw_sp_vr *vrs;
68 struct rhashtable neigh_ht;
69 struct rhashtable nexthop_group_ht;
70 struct rhashtable nexthop_ht;
71 struct {
72 struct mlxsw_sp_lpm_tree *trees;
73 unsigned int tree_count;
74 } lpm;
75 struct {
76 struct delayed_work dw;
77 unsigned long interval; /* ms */
78 } neighs_update;
79 struct delayed_work nexthop_probe_dw;
80#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
81 struct list_head nexthop_neighs_list;
82 bool aborted;
7e39d115 83 struct notifier_block fib_nb;
e4f3c1c1 84 const struct mlxsw_sp_rif_ops **rif_ops_arr;
9011b677
IS
85};
86
4724ba56
IS
87struct mlxsw_sp_rif {
88 struct list_head nexthop_list;
89 struct list_head neigh_list;
90 struct net_device *dev;
a1107487 91 struct mlxsw_sp_fid *fid;
4724ba56
IS
92 unsigned char addr[ETH_ALEN];
93 int mtu;
bf95233e 94 u16 rif_index;
6913229e 95 u16 vr_id;
e4f3c1c1
IS
96 const struct mlxsw_sp_rif_ops *ops;
97 struct mlxsw_sp *mlxsw_sp;
98
e0c0afd8
AS
99 unsigned int counter_ingress;
100 bool counter_ingress_valid;
101 unsigned int counter_egress;
102 bool counter_egress_valid;
4724ba56
IS
103};
104
e4f3c1c1
IS
105struct mlxsw_sp_rif_params {
106 struct net_device *dev;
107 union {
108 u16 system_port;
109 u16 lag_id;
110 };
111 u16 vid;
112 bool lag;
113};
114
4d93ceeb
IS
115struct mlxsw_sp_rif_subport {
116 struct mlxsw_sp_rif common;
117 union {
118 u16 system_port;
119 u16 lag_id;
120 };
121 u16 vid;
122 bool lag;
123};
124
e4f3c1c1
IS
125struct mlxsw_sp_rif_ops {
126 enum mlxsw_sp_rif_type type;
127 size_t rif_size;
128
129 void (*setup)(struct mlxsw_sp_rif *rif,
130 const struct mlxsw_sp_rif_params *params);
131 int (*configure)(struct mlxsw_sp_rif *rif);
132 void (*deconfigure)(struct mlxsw_sp_rif *rif);
133 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
134};
135
e0c0afd8
AS
136static unsigned int *
137mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
138 enum mlxsw_sp_rif_counter_dir dir)
139{
140 switch (dir) {
141 case MLXSW_SP_RIF_COUNTER_EGRESS:
142 return &rif->counter_egress;
143 case MLXSW_SP_RIF_COUNTER_INGRESS:
144 return &rif->counter_ingress;
145 }
146 return NULL;
147}
148
149static bool
150mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
151 enum mlxsw_sp_rif_counter_dir dir)
152{
153 switch (dir) {
154 case MLXSW_SP_RIF_COUNTER_EGRESS:
155 return rif->counter_egress_valid;
156 case MLXSW_SP_RIF_COUNTER_INGRESS:
157 return rif->counter_ingress_valid;
158 }
159 return false;
160}
161
162static void
163mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
164 enum mlxsw_sp_rif_counter_dir dir,
165 bool valid)
166{
167 switch (dir) {
168 case MLXSW_SP_RIF_COUNTER_EGRESS:
169 rif->counter_egress_valid = valid;
170 break;
171 case MLXSW_SP_RIF_COUNTER_INGRESS:
172 rif->counter_ingress_valid = valid;
173 break;
174 }
175}
176
177static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
178 unsigned int counter_index, bool enable,
179 enum mlxsw_sp_rif_counter_dir dir)
180{
181 char ritr_pl[MLXSW_REG_RITR_LEN];
182 bool is_egress = false;
183 int err;
184
185 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
186 is_egress = true;
187 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
188 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
189 if (err)
190 return err;
191
192 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
193 is_egress);
194 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
195}
196
197int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
198 struct mlxsw_sp_rif *rif,
199 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
200{
201 char ricnt_pl[MLXSW_REG_RICNT_LEN];
202 unsigned int *p_counter_index;
203 bool valid;
204 int err;
205
206 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
207 if (!valid)
208 return -EINVAL;
209
210 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
211 if (!p_counter_index)
212 return -EINVAL;
213 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
214 MLXSW_REG_RICNT_OPCODE_NOP);
215 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
216 if (err)
217 return err;
218 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
219 return 0;
220}
221
222static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
223 unsigned int counter_index)
224{
225 char ricnt_pl[MLXSW_REG_RICNT_LEN];
226
227 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
228 MLXSW_REG_RICNT_OPCODE_CLEAR);
229 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
230}
231
232int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
233 struct mlxsw_sp_rif *rif,
234 enum mlxsw_sp_rif_counter_dir dir)
235{
236 unsigned int *p_counter_index;
237 int err;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
243 p_counter_index);
244 if (err)
245 return err;
246
247 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
248 if (err)
249 goto err_counter_clear;
250
251 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
252 *p_counter_index, true, dir);
253 if (err)
254 goto err_counter_edit;
255 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
256 return 0;
257
258err_counter_edit:
259err_counter_clear:
260 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
261 *p_counter_index);
262 return err;
263}
264
265void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
266 struct mlxsw_sp_rif *rif,
267 enum mlxsw_sp_rif_counter_dir dir)
268{
269 unsigned int *p_counter_index;
270
6b1206bb
AS
271 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
272 return;
273
e0c0afd8
AS
274 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
275 if (WARN_ON(!p_counter_index))
276 return;
277 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
278 *p_counter_index, false, dir);
279 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
280 *p_counter_index);
281 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
282}
283
e4f3c1c1
IS
284static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
285{
286 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
287 struct devlink *devlink;
288
289 devlink = priv_to_devlink(mlxsw_sp->core);
290 if (!devlink_dpipe_table_counter_enabled(devlink,
291 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
292 return;
293 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
294}
295
296static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
297{
298 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
299
300 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
301}
302
4724ba56
IS
303static struct mlxsw_sp_rif *
304mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
305 const struct net_device *dev);
306
9011b677
IS
307#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
308
309struct mlxsw_sp_prefix_usage {
310 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
311};
312
53342023
JP
313#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
314 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
315
6b75c480
JP
316static bool
317mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
318 struct mlxsw_sp_prefix_usage *prefix_usage2)
319{
320 unsigned char prefix;
321
322 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
323 if (!test_bit(prefix, prefix_usage2->b))
324 return false;
325 }
326 return true;
327}
328
53342023
JP
329static bool
330mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
331 struct mlxsw_sp_prefix_usage *prefix_usage2)
332{
333 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
334}
335
6b75c480
JP
336static bool
337mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
338{
339 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
340
341 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
342}
343
344static void
345mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
346 struct mlxsw_sp_prefix_usage *prefix_usage2)
347{
348 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
349}
350
5e9c16cc
JP
351static void
352mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
353 unsigned char prefix_len)
354{
355 set_bit(prefix_len, prefix_usage->b);
356}
357
358static void
359mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
360 unsigned char prefix_len)
361{
362 clear_bit(prefix_len, prefix_usage->b);
363}
364
365struct mlxsw_sp_fib_key {
366 unsigned char addr[sizeof(struct in6_addr)];
367 unsigned char prefix_len;
368};
369
61c503f9
JP
370enum mlxsw_sp_fib_entry_type {
371 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
372 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
373 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
374};
375
a7ff87ac 376struct mlxsw_sp_nexthop_group;
9011b677 377struct mlxsw_sp_fib;
a7ff87ac 378
9aecce1c
IS
379struct mlxsw_sp_fib_node {
380 struct list_head entry_list;
b45f64d1 381 struct list_head list;
9aecce1c 382 struct rhash_head ht_node;
76610ebb 383 struct mlxsw_sp_fib *fib;
5e9c16cc 384 struct mlxsw_sp_fib_key key;
9aecce1c
IS
385};
386
387struct mlxsw_sp_fib_entry_params {
388 u32 tb_id;
389 u32 prio;
390 u8 tos;
391 u8 type;
392};
393
394struct mlxsw_sp_fib_entry {
395 struct list_head list;
396 struct mlxsw_sp_fib_node *fib_node;
61c503f9 397 enum mlxsw_sp_fib_entry_type type;
a7ff87ac
JP
398 struct list_head nexthop_group_node;
399 struct mlxsw_sp_nexthop_group *nh_group;
9aecce1c 400 struct mlxsw_sp_fib_entry_params params;
013b20f9 401 bool offloaded;
5e9c16cc
JP
402};
403
9011b677
IS
404enum mlxsw_sp_l3proto {
405 MLXSW_SP_L3_PROTO_IPV4,
406 MLXSW_SP_L3_PROTO_IPV6,
407};
408
409struct mlxsw_sp_lpm_tree {
410 u8 id; /* tree ID */
411 unsigned int ref_count;
412 enum mlxsw_sp_l3proto proto;
413 struct mlxsw_sp_prefix_usage prefix_usage;
414};
415
5e9c16cc
JP
416struct mlxsw_sp_fib {
417 struct rhashtable ht;
9aecce1c 418 struct list_head node_list;
76610ebb
IS
419 struct mlxsw_sp_vr *vr;
420 struct mlxsw_sp_lpm_tree *lpm_tree;
5e9c16cc
JP
421 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
422 struct mlxsw_sp_prefix_usage prefix_usage;
76610ebb 423 enum mlxsw_sp_l3proto proto;
5e9c16cc
JP
424};
425
9011b677
IS
426struct mlxsw_sp_vr {
427 u16 id; /* virtual router ID */
428 u32 tb_id; /* kernel fib table id */
429 unsigned int rif_count;
430 struct mlxsw_sp_fib *fib4;
431};
432
9aecce1c 433static const struct rhashtable_params mlxsw_sp_fib_ht_params;
5e9c16cc 434
76610ebb
IS
435static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
436 enum mlxsw_sp_l3proto proto)
5e9c16cc
JP
437{
438 struct mlxsw_sp_fib *fib;
439 int err;
440
441 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
442 if (!fib)
443 return ERR_PTR(-ENOMEM);
444 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
445 if (err)
446 goto err_rhashtable_init;
9aecce1c 447 INIT_LIST_HEAD(&fib->node_list);
76610ebb
IS
448 fib->proto = proto;
449 fib->vr = vr;
5e9c16cc
JP
450 return fib;
451
452err_rhashtable_init:
453 kfree(fib);
454 return ERR_PTR(err);
455}
456
457static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
458{
9aecce1c 459 WARN_ON(!list_empty(&fib->node_list));
76610ebb 460 WARN_ON(fib->lpm_tree);
5e9c16cc
JP
461 rhashtable_destroy(&fib->ht);
462 kfree(fib);
463}
464
53342023 465static struct mlxsw_sp_lpm_tree *
382dbb40 466mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
53342023
JP
467{
468 static struct mlxsw_sp_lpm_tree *lpm_tree;
469 int i;
470
9011b677
IS
471 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
472 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
382dbb40
IS
473 if (lpm_tree->ref_count == 0)
474 return lpm_tree;
53342023
JP
475 }
476 return NULL;
477}
478
479static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
480 struct mlxsw_sp_lpm_tree *lpm_tree)
481{
482 char ralta_pl[MLXSW_REG_RALTA_LEN];
483
1a9234e6
IS
484 mlxsw_reg_ralta_pack(ralta_pl, true,
485 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
486 lpm_tree->id);
53342023
JP
487 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
488}
489
490static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
491 struct mlxsw_sp_lpm_tree *lpm_tree)
492{
493 char ralta_pl[MLXSW_REG_RALTA_LEN];
494
1a9234e6
IS
495 mlxsw_reg_ralta_pack(ralta_pl, false,
496 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
497 lpm_tree->id);
53342023
JP
498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
499}
500
501static int
502mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
503 struct mlxsw_sp_prefix_usage *prefix_usage,
504 struct mlxsw_sp_lpm_tree *lpm_tree)
505{
506 char ralst_pl[MLXSW_REG_RALST_LEN];
507 u8 root_bin = 0;
508 u8 prefix;
509 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
510
511 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
512 root_bin = prefix;
513
514 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
515 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
516 if (prefix == 0)
517 continue;
518 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
519 MLXSW_REG_RALST_BIN_NO_CHILD);
520 last_prefix = prefix;
521 }
522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
523}
524
525static struct mlxsw_sp_lpm_tree *
526mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
527 struct mlxsw_sp_prefix_usage *prefix_usage,
382dbb40 528 enum mlxsw_sp_l3proto proto)
53342023
JP
529{
530 struct mlxsw_sp_lpm_tree *lpm_tree;
531 int err;
532
382dbb40 533 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
53342023
JP
534 if (!lpm_tree)
535 return ERR_PTR(-EBUSY);
536 lpm_tree->proto = proto;
537 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
538 if (err)
539 return ERR_PTR(err);
540
541 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
542 lpm_tree);
543 if (err)
544 goto err_left_struct_set;
2083d367
JP
545 memcpy(&lpm_tree->prefix_usage, prefix_usage,
546 sizeof(lpm_tree->prefix_usage));
53342023
JP
547 return lpm_tree;
548
549err_left_struct_set:
550 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
551 return ERR_PTR(err);
552}
553
554static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
555 struct mlxsw_sp_lpm_tree *lpm_tree)
556{
557 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
558}
559
560static struct mlxsw_sp_lpm_tree *
561mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_prefix_usage *prefix_usage,
382dbb40 563 enum mlxsw_sp_l3proto proto)
53342023
JP
564{
565 struct mlxsw_sp_lpm_tree *lpm_tree;
566 int i;
567
9011b677
IS
568 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
569 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
8b99becd
JP
570 if (lpm_tree->ref_count != 0 &&
571 lpm_tree->proto == proto &&
53342023
JP
572 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
573 prefix_usage))
574 goto inc_ref_count;
575 }
576 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
382dbb40 577 proto);
53342023
JP
578 if (IS_ERR(lpm_tree))
579 return lpm_tree;
580
581inc_ref_count:
582 lpm_tree->ref_count++;
583 return lpm_tree;
584}
585
586static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
587 struct mlxsw_sp_lpm_tree *lpm_tree)
588{
589 if (--lpm_tree->ref_count == 0)
590 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
591 return 0;
592}
593
d7a60306 594#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
8494ab06
IS
595
596static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
53342023
JP
597{
598 struct mlxsw_sp_lpm_tree *lpm_tree;
8494ab06 599 u64 max_trees;
53342023
JP
600 int i;
601
8494ab06
IS
602 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
603 return -EIO;
604
605 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
9011b677
IS
606 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
607 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
8494ab06
IS
608 sizeof(struct mlxsw_sp_lpm_tree),
609 GFP_KERNEL);
9011b677 610 if (!mlxsw_sp->router->lpm.trees)
8494ab06
IS
611 return -ENOMEM;
612
9011b677
IS
613 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
614 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
53342023
JP
615 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
616 }
8494ab06
IS
617
618 return 0;
619}
620
621static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
622{
9011b677 623 kfree(mlxsw_sp->router->lpm.trees);
53342023
JP
624}
625
76610ebb
IS
626static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
627{
628 return !!vr->fib4;
629}
630
6b75c480
JP
631static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
632{
633 struct mlxsw_sp_vr *vr;
634 int i;
635
c1a38311 636 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 637 vr = &mlxsw_sp->router->vrs[i];
76610ebb 638 if (!mlxsw_sp_vr_is_used(vr))
6b75c480
JP
639 return vr;
640 }
641 return NULL;
642}
643
644static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
76610ebb 645 const struct mlxsw_sp_fib *fib)
6b75c480
JP
646{
647 char raltb_pl[MLXSW_REG_RALTB_LEN];
648
76610ebb
IS
649 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
650 (enum mlxsw_reg_ralxx_protocol) fib->proto,
651 fib->lpm_tree->id);
6b75c480
JP
652 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
653}
654
655static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
76610ebb 656 const struct mlxsw_sp_fib *fib)
6b75c480
JP
657{
658 char raltb_pl[MLXSW_REG_RALTB_LEN];
659
660 /* Bind to tree 0 which is default */
76610ebb
IS
661 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
662 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
6b75c480
JP
663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
664}
665
666static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
667{
668 /* For our purpose, squash main and local table into one */
669 if (tb_id == RT_TABLE_LOCAL)
670 tb_id = RT_TABLE_MAIN;
671 return tb_id;
672}
673
674static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
76610ebb 675 u32 tb_id)
6b75c480
JP
676{
677 struct mlxsw_sp_vr *vr;
678 int i;
679
680 tb_id = mlxsw_sp_fix_tb_id(tb_id);
9497c042 681
c1a38311 682 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 683 vr = &mlxsw_sp->router->vrs[i];
76610ebb 684 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
6b75c480
JP
685 return vr;
686 }
687 return NULL;
688}
689
76610ebb
IS
690static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
691 enum mlxsw_sp_l3proto proto)
692{
693 switch (proto) {
694 case MLXSW_SP_L3_PROTO_IPV4:
695 return vr->fib4;
696 case MLXSW_SP_L3_PROTO_IPV6:
697 BUG_ON(1);
698 }
699 return NULL;
700}
701
6b75c480 702static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
76610ebb 703 u32 tb_id)
6b75c480 704{
6b75c480 705 struct mlxsw_sp_vr *vr;
6b75c480
JP
706
707 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
708 if (!vr)
709 return ERR_PTR(-EBUSY);
76610ebb
IS
710 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
711 if (IS_ERR(vr->fib4))
712 return ERR_CAST(vr->fib4);
6b75c480 713 vr->tb_id = tb_id;
6b75c480 714 return vr;
6b75c480
JP
715}
716
76610ebb 717static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
6b75c480 718{
76610ebb
IS
719 mlxsw_sp_fib_destroy(vr->fib4);
720 vr->fib4 = NULL;
6b75c480
JP
721}
722
723static int
76610ebb 724mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
6b75c480
JP
725 struct mlxsw_sp_prefix_usage *req_prefix_usage)
726{
76610ebb 727 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
f7df4923
IS
728 struct mlxsw_sp_lpm_tree *new_tree;
729 int err;
6b75c480 730
f7df4923 731 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
6b75c480
JP
732 return 0;
733
f7df4923 734 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
76610ebb 735 fib->proto);
f7df4923 736 if (IS_ERR(new_tree)) {
6b75c480
JP
737 /* We failed to get a tree according to the required
738 * prefix usage. However, the current tree might be still good
739 * for us if our requirement is subset of the prefixes used
740 * in the tree.
741 */
742 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
f7df4923 743 &lpm_tree->prefix_usage))
6b75c480 744 return 0;
f7df4923 745 return PTR_ERR(new_tree);
6b75c480
JP
746 }
747
f7df4923 748 /* Prevent packet loss by overwriting existing binding */
76610ebb
IS
749 fib->lpm_tree = new_tree;
750 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
f7df4923
IS
751 if (err)
752 goto err_tree_bind;
753 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
754
755 return 0;
756
757err_tree_bind:
76610ebb 758 fib->lpm_tree = lpm_tree;
f7df4923
IS
759 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
760 return err;
6b75c480
JP
761}
762
76610ebb 763static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
6b75c480
JP
764{
765 struct mlxsw_sp_vr *vr;
6b75c480
JP
766
767 tb_id = mlxsw_sp_fix_tb_id(tb_id);
76610ebb
IS
768 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
769 if (!vr)
770 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
6b75c480
JP
771 return vr;
772}
773
76610ebb 774static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
6b75c480 775{
6913229e 776 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
76610ebb 777 mlxsw_sp_vr_destroy(vr);
6b75c480
JP
778}
779
9497c042 780static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
6b75c480
JP
781{
782 struct mlxsw_sp_vr *vr;
c1a38311 783 u64 max_vrs;
6b75c480
JP
784 int i;
785
c1a38311 786 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
9497c042
NF
787 return -EIO;
788
c1a38311 789 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
9011b677
IS
790 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
791 GFP_KERNEL);
792 if (!mlxsw_sp->router->vrs)
9497c042
NF
793 return -ENOMEM;
794
c1a38311 795 for (i = 0; i < max_vrs; i++) {
9011b677 796 vr = &mlxsw_sp->router->vrs[i];
6b75c480
JP
797 vr->id = i;
798 }
9497c042
NF
799
800 return 0;
801}
802
ac571de9
IS
803static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
804
9497c042
NF
805static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
806{
3057224e
IS
807 /* At this stage we're guaranteed not to have new incoming
808 * FIB notifications and the work queue is free from FIBs
809 * sitting on top of mlxsw netdevs. However, we can still
810 * have other FIBs queued. Flush the queue before flushing
811 * the device's tables. No need for locks, as we're the only
812 * writer.
813 */
814 mlxsw_core_flush_owq();
ac571de9 815 mlxsw_sp_router_fib_flush(mlxsw_sp);
9011b677 816 kfree(mlxsw_sp->router->vrs);
6b75c480
JP
817}
818
6cf3c971 819struct mlxsw_sp_neigh_key {
33b1341c 820 struct neighbour *n;
6cf3c971
JP
821};
822
823struct mlxsw_sp_neigh_entry {
9665b745 824 struct list_head rif_list_node;
6cf3c971
JP
825 struct rhash_head ht_node;
826 struct mlxsw_sp_neigh_key key;
827 u16 rif;
5c8802f1 828 bool connected;
a6bf9e93 829 unsigned char ha[ETH_ALEN];
a7ff87ac
JP
830 struct list_head nexthop_list; /* list of nexthops using
831 * this neigh entry
832 */
b2157149 833 struct list_head nexthop_neighs_list_node;
6cf3c971
JP
834};
835
836static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
837 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
838 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
839 .key_len = sizeof(struct mlxsw_sp_neigh_key),
840};
841
6cf3c971 842static struct mlxsw_sp_neigh_entry *
5c8802f1
IS
843mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
844 u16 rif)
6cf3c971
JP
845{
846 struct mlxsw_sp_neigh_entry *neigh_entry;
847
5c8802f1 848 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
6cf3c971
JP
849 if (!neigh_entry)
850 return NULL;
5c8802f1 851
33b1341c 852 neigh_entry->key.n = n;
6cf3c971 853 neigh_entry->rif = rif;
a7ff87ac 854 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
5c8802f1 855
6cf3c971
JP
856 return neigh_entry;
857}
858
5c8802f1 859static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971
JP
860{
861 kfree(neigh_entry);
862}
863
5c8802f1
IS
864static int
865mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
866 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 867{
9011b677 868 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1
IS
869 &neigh_entry->ht_node,
870 mlxsw_sp_neigh_ht_params);
871}
6cf3c971 872
5c8802f1
IS
873static void
874mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
875 struct mlxsw_sp_neigh_entry *neigh_entry)
876{
9011b677 877 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1
IS
878 &neigh_entry->ht_node,
879 mlxsw_sp_neigh_ht_params);
6cf3c971
JP
880}
881
5c8802f1
IS
882static struct mlxsw_sp_neigh_entry *
883mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
6cf3c971 884{
6cf3c971 885 struct mlxsw_sp_neigh_entry *neigh_entry;
bf95233e 886 struct mlxsw_sp_rif *rif;
6cf3c971
JP
887 int err;
888
bf95233e
AS
889 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
890 if (!rif)
5c8802f1 891 return ERR_PTR(-EINVAL);
6cf3c971 892
bf95233e 893 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
6cf3c971 894 if (!neigh_entry)
5c8802f1
IS
895 return ERR_PTR(-ENOMEM);
896
6cf3c971
JP
897 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
898 if (err)
899 goto err_neigh_entry_insert;
5c8802f1 900
bf95233e 901 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
9665b745 902
5c8802f1 903 return neigh_entry;
6cf3c971
JP
904
905err_neigh_entry_insert:
5c8802f1
IS
906 mlxsw_sp_neigh_entry_free(neigh_entry);
907 return ERR_PTR(err);
6cf3c971
JP
908}
909
5c8802f1
IS
910static void
911mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
912 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 913{
9665b745 914 list_del(&neigh_entry->rif_list_node);
5c8802f1
IS
915 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
916 mlxsw_sp_neigh_entry_free(neigh_entry);
917}
6cf3c971 918
5c8802f1
IS
919static struct mlxsw_sp_neigh_entry *
920mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
921{
922 struct mlxsw_sp_neigh_key key;
6cf3c971 923
5c8802f1 924 key.n = n;
9011b677 925 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1 926 &key, mlxsw_sp_neigh_ht_params);
6cf3c971
JP
927}
928
c723c735
YG
929static void
930mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
931{
932 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
933
9011b677 934 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
c723c735
YG
935}
936
937static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
938 char *rauhtd_pl,
939 int ent_index)
940{
941 struct net_device *dev;
942 struct neighbour *n;
943 __be32 dipn;
944 u32 dip;
945 u16 rif;
946
947 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
948
5f9efffb 949 if (!mlxsw_sp->router->rifs[rif]) {
c723c735
YG
950 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
951 return;
952 }
953
954 dipn = htonl(dip);
5f9efffb 955 dev = mlxsw_sp->router->rifs[rif]->dev;
c723c735
YG
956 n = neigh_lookup(&arp_tbl, &dipn, dev);
957 if (!n) {
958 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
959 &dip);
960 return;
961 }
962
963 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
964 neigh_event_send(n, NULL);
965 neigh_release(n);
966}
967
968static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
969 char *rauhtd_pl,
970 int rec_index)
971{
972 u8 num_entries;
973 int i;
974
975 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
976 rec_index);
977 /* Hardware starts counting at 0, so add 1. */
978 num_entries++;
979
980 /* Each record consists of several neighbour entries. */
981 for (i = 0; i < num_entries; i++) {
982 int ent_index;
983
984 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
985 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
986 ent_index);
987 }
988
989}
990
991static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
992 char *rauhtd_pl, int rec_index)
993{
994 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
995 case MLXSW_REG_RAUHTD_TYPE_IPV4:
996 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
997 rec_index);
998 break;
999 case MLXSW_REG_RAUHTD_TYPE_IPV6:
1000 WARN_ON_ONCE(1);
1001 break;
1002 }
1003}
1004
42cdb338
AS
1005static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1006{
1007 u8 num_rec, last_rec_index, num_entries;
1008
1009 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1010 last_rec_index = num_rec - 1;
1011
1012 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1013 return false;
1014 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1015 MLXSW_REG_RAUHTD_TYPE_IPV6)
1016 return true;
1017
1018 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1019 last_rec_index);
1020 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1021 return true;
1022 return false;
1023}
1024
b2157149 1025static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
c723c735 1026{
c723c735
YG
1027 char *rauhtd_pl;
1028 u8 num_rec;
1029 int i, err;
1030
1031 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1032 if (!rauhtd_pl)
b2157149 1033 return -ENOMEM;
c723c735
YG
1034
1035 /* Make sure the neighbour's netdev isn't removed in the
1036 * process.
1037 */
1038 rtnl_lock();
1039 do {
1040 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
1041 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1042 rauhtd_pl);
1043 if (err) {
1044 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
1045 break;
1046 }
1047 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1048 for (i = 0; i < num_rec; i++)
1049 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1050 i);
42cdb338 1051 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
c723c735
YG
1052 rtnl_unlock();
1053
1054 kfree(rauhtd_pl);
b2157149
YG
1055 return err;
1056}
1057
1058static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1059{
1060 struct mlxsw_sp_neigh_entry *neigh_entry;
1061
1062 /* Take RTNL mutex here to prevent lists from changes */
1063 rtnl_lock();
9011b677 1064 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
8a0b7275 1065 nexthop_neighs_list_node)
b2157149
YG
1066 /* If this neigh have nexthops, make the kernel think this neigh
1067 * is active regardless of the traffic.
1068 */
8a0b7275 1069 neigh_event_send(neigh_entry->key.n, NULL);
b2157149
YG
1070 rtnl_unlock();
1071}
1072
1073static void
1074mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1075{
9011b677 1076 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
b2157149 1077
9011b677 1078 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
b2157149
YG
1079 msecs_to_jiffies(interval));
1080}
1081
1082static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1083{
9011b677 1084 struct mlxsw_sp_router *router;
b2157149
YG
1085 int err;
1086
9011b677
IS
1087 router = container_of(work, struct mlxsw_sp_router,
1088 neighs_update.dw.work);
1089 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
b2157149 1090 if (err)
9011b677 1091 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
b2157149 1092
9011b677 1093 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
b2157149 1094
9011b677 1095 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
c723c735
YG
1096}
1097
0b2361d9
YG
1098static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1099{
1100 struct mlxsw_sp_neigh_entry *neigh_entry;
9011b677 1101 struct mlxsw_sp_router *router;
0b2361d9 1102
9011b677
IS
1103 router = container_of(work, struct mlxsw_sp_router,
1104 nexthop_probe_dw.work);
0b2361d9
YG
1105 /* Iterate over nexthop neighbours, find those who are unresolved and
1106 * send arp on them. This solves the chicken-egg problem when
1107 * the nexthop wouldn't get offloaded until the neighbor is resolved
1108 * but it wouldn't get resolved ever in case traffic is flowing in HW
1109 * using different nexthop.
1110 *
1111 * Take RTNL mutex here to prevent lists from changes.
1112 */
1113 rtnl_lock();
9011b677 1114 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
8a0b7275 1115 nexthop_neighs_list_node)
01b1aa35 1116 if (!neigh_entry->connected)
33b1341c 1117 neigh_event_send(neigh_entry->key.n, NULL);
0b2361d9
YG
1118 rtnl_unlock();
1119
9011b677 1120 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
0b2361d9
YG
1121 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1122}
1123
a7ff87ac
JP
1124static void
1125mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1126 struct mlxsw_sp_neigh_entry *neigh_entry,
1127 bool removing);
1128
5c8802f1
IS
1129static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
1130{
1131 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1132 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1133}
1134
1135static void
1136mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1137 struct mlxsw_sp_neigh_entry *neigh_entry,
1138 enum mlxsw_reg_rauht_op op)
a6bf9e93 1139{
33b1341c 1140 struct neighbour *n = neigh_entry->key.n;
5c8802f1 1141 u32 dip = ntohl(*((__be32 *) n->primary_key));
a6bf9e93 1142 char rauht_pl[MLXSW_REG_RAUHT_LEN];
5c8802f1
IS
1143
1144 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1145 dip);
1146 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1147}
1148
1149static void
1150mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1151 struct mlxsw_sp_neigh_entry *neigh_entry,
1152 bool adding)
1153{
1154 if (!adding && !neigh_entry->connected)
1155 return;
1156 neigh_entry->connected = adding;
1157 if (neigh_entry->key.n->tbl == &arp_tbl)
1158 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1159 mlxsw_sp_rauht_op(adding));
1160 else
1161 WARN_ON_ONCE(1);
1162}
1163
1164struct mlxsw_sp_neigh_event_work {
1165 struct work_struct work;
1166 struct mlxsw_sp *mlxsw_sp;
1167 struct neighbour *n;
1168};
1169
1170static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1171{
1172 struct mlxsw_sp_neigh_event_work *neigh_work =
1173 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1174 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1175 struct mlxsw_sp_neigh_entry *neigh_entry;
1176 struct neighbour *n = neigh_work->n;
1177 unsigned char ha[ETH_ALEN];
a6bf9e93 1178 bool entry_connected;
93a87e5e 1179 u8 nud_state, dead;
a6bf9e93 1180
5c8802f1
IS
1181 /* If these parameters are changed after we release the lock,
1182 * then we are guaranteed to receive another event letting us
1183 * know about it.
1184 */
a6bf9e93 1185 read_lock_bh(&n->lock);
5c8802f1 1186 memcpy(ha, n->ha, ETH_ALEN);
a6bf9e93 1187 nud_state = n->nud_state;
93a87e5e 1188 dead = n->dead;
a6bf9e93
YG
1189 read_unlock_bh(&n->lock);
1190
5c8802f1 1191 rtnl_lock();
93a87e5e 1192 entry_connected = nud_state & NUD_VALID && !dead;
5c8802f1
IS
1193 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1194 if (!entry_connected && !neigh_entry)
1195 goto out;
1196 if (!neigh_entry) {
1197 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1198 if (IS_ERR(neigh_entry))
1199 goto out;
a6bf9e93
YG
1200 }
1201
5c8802f1
IS
1202 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1203 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1204 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1205
1206 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1207 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1208
1209out:
1210 rtnl_unlock();
a6bf9e93 1211 neigh_release(n);
5c8802f1 1212 kfree(neigh_work);
a6bf9e93
YG
1213}
1214
e7322638
JP
1215int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1216 unsigned long event, void *ptr)
c723c735 1217{
5c8802f1 1218 struct mlxsw_sp_neigh_event_work *neigh_work;
c723c735
YG
1219 struct mlxsw_sp_port *mlxsw_sp_port;
1220 struct mlxsw_sp *mlxsw_sp;
1221 unsigned long interval;
1222 struct neigh_parms *p;
a6bf9e93 1223 struct neighbour *n;
c723c735
YG
1224
1225 switch (event) {
1226 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1227 p = ptr;
1228
1229 /* We don't care about changes in the default table. */
1230 if (!p->dev || p->tbl != &arp_tbl)
1231 return NOTIFY_DONE;
1232
1233 /* We are in atomic context and can't take RTNL mutex,
1234 * so use RCU variant to walk the device chain.
1235 */
1236 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1237 if (!mlxsw_sp_port)
1238 return NOTIFY_DONE;
1239
1240 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1241 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
9011b677 1242 mlxsw_sp->router->neighs_update.interval = interval;
c723c735
YG
1243
1244 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1245 break;
a6bf9e93
YG
1246 case NETEVENT_NEIGH_UPDATE:
1247 n = ptr;
a6bf9e93
YG
1248
1249 if (n->tbl != &arp_tbl)
1250 return NOTIFY_DONE;
1251
5c8802f1 1252 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
a6bf9e93
YG
1253 if (!mlxsw_sp_port)
1254 return NOTIFY_DONE;
1255
5c8802f1
IS
1256 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1257 if (!neigh_work) {
a6bf9e93 1258 mlxsw_sp_port_dev_put(mlxsw_sp_port);
5c8802f1 1259 return NOTIFY_BAD;
a6bf9e93 1260 }
5c8802f1
IS
1261
1262 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1263 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1264 neigh_work->n = n;
a6bf9e93
YG
1265
1266 /* Take a reference to ensure the neighbour won't be
1267 * destructed until we drop the reference in delayed
1268 * work.
1269 */
1270 neigh_clone(n);
5c8802f1
IS
1271 mlxsw_core_schedule_work(&neigh_work->work);
1272 mlxsw_sp_port_dev_put(mlxsw_sp_port);
a6bf9e93 1273 break;
c723c735
YG
1274 }
1275
1276 return NOTIFY_DONE;
1277}
1278
6cf3c971
JP
1279static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1280{
c723c735
YG
1281 int err;
1282
9011b677 1283 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
c723c735
YG
1284 &mlxsw_sp_neigh_ht_params);
1285 if (err)
1286 return err;
1287
1288 /* Initialize the polling interval according to the default
1289 * table.
1290 */
1291 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1292
0b2361d9 1293 /* Create the delayed works for the activity_update */
9011b677 1294 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
c723c735 1295 mlxsw_sp_router_neighs_update_work);
9011b677 1296 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
0b2361d9 1297 mlxsw_sp_router_probe_unresolved_nexthops);
9011b677
IS
1298 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1299 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
c723c735 1300 return 0;
6cf3c971
JP
1301}
1302
1303static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1304{
9011b677
IS
1305 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1306 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1307 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
6cf3c971
JP
1308}
1309
9665b745 1310static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
bf95233e 1311 const struct mlxsw_sp_rif *rif)
9665b745
IS
1312{
1313 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1314
1315 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
bf95233e 1316 rif->rif_index, rif->addr);
9665b745
IS
1317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1318}
1319
1320static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 1321 struct mlxsw_sp_rif *rif)
9665b745
IS
1322{
1323 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1324
bf95233e
AS
1325 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1326 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
9665b745
IS
1327 rif_list_node)
1328 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1329}
1330
c53b8e1b
IS
1331struct mlxsw_sp_nexthop_key {
1332 struct fib_nh *fib_nh;
1333};
1334
a7ff87ac
JP
1335struct mlxsw_sp_nexthop {
1336 struct list_head neigh_list_node; /* member of neigh entry list */
9665b745 1337 struct list_head rif_list_node;
a7ff87ac
JP
1338 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1339 * this belongs to
1340 */
c53b8e1b
IS
1341 struct rhash_head ht_node;
1342 struct mlxsw_sp_nexthop_key key;
bf95233e 1343 struct mlxsw_sp_rif *rif;
a7ff87ac
JP
1344 u8 should_offload:1, /* set indicates this neigh is connected and
1345 * should be put to KVD linear area of this group.
1346 */
1347 offloaded:1, /* set in case the neigh is actually put into
1348 * KVD linear area of this group.
1349 */
1350 update:1; /* set indicates that MAC of this neigh should be
1351 * updated in HW
1352 */
1353 struct mlxsw_sp_neigh_entry *neigh_entry;
1354};
1355
e9ad5e7d
IS
1356struct mlxsw_sp_nexthop_group_key {
1357 struct fib_info *fi;
1358};
1359
a7ff87ac 1360struct mlxsw_sp_nexthop_group {
e9ad5e7d 1361 struct rhash_head ht_node;
a7ff87ac 1362 struct list_head fib_list; /* list of fib entries that use this group */
e9ad5e7d 1363 struct mlxsw_sp_nexthop_group_key key;
b3e8d1eb
IS
1364 u8 adj_index_valid:1,
1365 gateway:1; /* routes using the group use a gateway */
a7ff87ac
JP
1366 u32 adj_index;
1367 u16 ecmp_size;
1368 u16 count;
1369 struct mlxsw_sp_nexthop nexthops[0];
bf95233e 1370#define nh_rif nexthops[0].rif
a7ff87ac
JP
1371};
1372
e9ad5e7d
IS
1373static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1374 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1375 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1376 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1377};
1378
1379static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1380 struct mlxsw_sp_nexthop_group *nh_grp)
1381{
9011b677 1382 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
1383 &nh_grp->ht_node,
1384 mlxsw_sp_nexthop_group_ht_params);
1385}
1386
1387static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1388 struct mlxsw_sp_nexthop_group *nh_grp)
1389{
9011b677 1390 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
1391 &nh_grp->ht_node,
1392 mlxsw_sp_nexthop_group_ht_params);
1393}
1394
1395static struct mlxsw_sp_nexthop_group *
1396mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1397 struct mlxsw_sp_nexthop_group_key key)
1398{
9011b677 1399 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
e9ad5e7d
IS
1400 mlxsw_sp_nexthop_group_ht_params);
1401}
1402
c53b8e1b
IS
1403static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1404 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1405 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1406 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1407};
1408
1409static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1410 struct mlxsw_sp_nexthop *nh)
1411{
9011b677 1412 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
c53b8e1b
IS
1413 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1414}
1415
1416static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1417 struct mlxsw_sp_nexthop *nh)
1418{
9011b677 1419 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
c53b8e1b
IS
1420 mlxsw_sp_nexthop_ht_params);
1421}
1422
ad178c8e
IS
1423static struct mlxsw_sp_nexthop *
1424mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1425 struct mlxsw_sp_nexthop_key key)
1426{
9011b677 1427 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
ad178c8e
IS
1428 mlxsw_sp_nexthop_ht_params);
1429}
1430
a7ff87ac 1431static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
76610ebb 1432 const struct mlxsw_sp_fib *fib,
a7ff87ac
JP
1433 u32 adj_index, u16 ecmp_size,
1434 u32 new_adj_index,
1435 u16 new_ecmp_size)
1436{
1437 char raleu_pl[MLXSW_REG_RALEU_LEN];
1438
1a9234e6 1439 mlxsw_reg_raleu_pack(raleu_pl,
76610ebb
IS
1440 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1441 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1a9234e6 1442 new_ecmp_size);
a7ff87ac
JP
1443 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1444}
1445
1446static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1447 struct mlxsw_sp_nexthop_group *nh_grp,
1448 u32 old_adj_index, u16 old_ecmp_size)
1449{
1450 struct mlxsw_sp_fib_entry *fib_entry;
76610ebb 1451 struct mlxsw_sp_fib *fib = NULL;
a7ff87ac
JP
1452 int err;
1453
1454 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
76610ebb 1455 if (fib == fib_entry->fib_node->fib)
a7ff87ac 1456 continue;
76610ebb
IS
1457 fib = fib_entry->fib_node->fib;
1458 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
a7ff87ac
JP
1459 old_adj_index,
1460 old_ecmp_size,
1461 nh_grp->adj_index,
1462 nh_grp->ecmp_size);
1463 if (err)
1464 return err;
1465 }
1466 return 0;
1467}
1468
1469static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1470 struct mlxsw_sp_nexthop *nh)
1471{
1472 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1473 char ratr_pl[MLXSW_REG_RATR_LEN];
1474
1475 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1476 true, adj_index, neigh_entry->rif);
1477 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1478 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1479}
1480
1481static int
1482mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
a59b7e02
IS
1483 struct mlxsw_sp_nexthop_group *nh_grp,
1484 bool reallocate)
a7ff87ac
JP
1485{
1486 u32 adj_index = nh_grp->adj_index; /* base */
1487 struct mlxsw_sp_nexthop *nh;
1488 int i;
1489 int err;
1490
1491 for (i = 0; i < nh_grp->count; i++) {
1492 nh = &nh_grp->nexthops[i];
1493
1494 if (!nh->should_offload) {
1495 nh->offloaded = 0;
1496 continue;
1497 }
1498
a59b7e02 1499 if (nh->update || reallocate) {
a7ff87ac
JP
1500 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1501 adj_index, nh);
1502 if (err)
1503 return err;
1504 nh->update = 0;
1505 nh->offloaded = 1;
1506 }
1507 adj_index++;
1508 }
1509 return 0;
1510}
1511
1512static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1513 struct mlxsw_sp_fib_entry *fib_entry);
1514
1515static int
1516mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1517 struct mlxsw_sp_nexthop_group *nh_grp)
1518{
1519 struct mlxsw_sp_fib_entry *fib_entry;
1520 int err;
1521
1522 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1523 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1524 if (err)
1525 return err;
1526 }
1527 return 0;
1528}
1529
1530static void
1531mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1532 struct mlxsw_sp_nexthop_group *nh_grp)
1533{
1534 struct mlxsw_sp_nexthop *nh;
1535 bool offload_change = false;
1536 u32 adj_index;
1537 u16 ecmp_size = 0;
1538 bool old_adj_index_valid;
1539 u32 old_adj_index;
1540 u16 old_ecmp_size;
a7ff87ac
JP
1541 int i;
1542 int err;
1543
b3e8d1eb
IS
1544 if (!nh_grp->gateway) {
1545 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1546 return;
1547 }
1548
a7ff87ac
JP
1549 for (i = 0; i < nh_grp->count; i++) {
1550 nh = &nh_grp->nexthops[i];
1551
1552 if (nh->should_offload ^ nh->offloaded) {
1553 offload_change = true;
1554 if (nh->should_offload)
1555 nh->update = 1;
1556 }
1557 if (nh->should_offload)
1558 ecmp_size++;
1559 }
1560 if (!offload_change) {
1561 /* Nothing was added or removed, so no need to reallocate. Just
1562 * update MAC on existing adjacency indexes.
1563 */
a59b7e02
IS
1564 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1565 false);
a7ff87ac
JP
1566 if (err) {
1567 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1568 goto set_trap;
1569 }
1570 return;
1571 }
1572 if (!ecmp_size)
1573 /* No neigh of this group is connected so we just set
1574 * the trap and let everthing flow through kernel.
1575 */
1576 goto set_trap;
1577
13124443
AS
1578 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1579 if (err) {
a7ff87ac
JP
1580 /* We ran out of KVD linear space, just set the
1581 * trap and let everything flow through kernel.
1582 */
1583 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1584 goto set_trap;
1585 }
a7ff87ac
JP
1586 old_adj_index_valid = nh_grp->adj_index_valid;
1587 old_adj_index = nh_grp->adj_index;
1588 old_ecmp_size = nh_grp->ecmp_size;
1589 nh_grp->adj_index_valid = 1;
1590 nh_grp->adj_index = adj_index;
1591 nh_grp->ecmp_size = ecmp_size;
a59b7e02 1592 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
a7ff87ac
JP
1593 if (err) {
1594 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1595 goto set_trap;
1596 }
1597
1598 if (!old_adj_index_valid) {
1599 /* The trap was set for fib entries, so we have to call
1600 * fib entry update to unset it and use adjacency index.
1601 */
1602 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1603 if (err) {
1604 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1605 goto set_trap;
1606 }
1607 return;
1608 }
1609
1610 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1611 old_adj_index, old_ecmp_size);
1612 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1613 if (err) {
1614 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1615 goto set_trap;
1616 }
1617 return;
1618
1619set_trap:
1620 old_adj_index_valid = nh_grp->adj_index_valid;
1621 nh_grp->adj_index_valid = 0;
1622 for (i = 0; i < nh_grp->count; i++) {
1623 nh = &nh_grp->nexthops[i];
1624 nh->offloaded = 0;
1625 }
1626 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1627 if (err)
1628 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1629 if (old_adj_index_valid)
1630 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1631}
1632
1633static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1634 bool removing)
1635{
1636 if (!removing && !nh->should_offload)
1637 nh->should_offload = 1;
1638 else if (removing && nh->offloaded)
1639 nh->should_offload = 0;
1640 nh->update = 1;
1641}
1642
1643static void
1644mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1645 struct mlxsw_sp_neigh_entry *neigh_entry,
1646 bool removing)
1647{
1648 struct mlxsw_sp_nexthop *nh;
1649
a7ff87ac
JP
1650 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1651 neigh_list_node) {
1652 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1653 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1654 }
a7ff87ac
JP
1655}
1656
9665b745 1657static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
bf95233e 1658 struct mlxsw_sp_rif *rif)
9665b745 1659{
bf95233e 1660 if (nh->rif)
9665b745
IS
1661 return;
1662
bf95233e
AS
1663 nh->rif = rif;
1664 list_add(&nh->rif_list_node, &rif->nexthop_list);
9665b745
IS
1665}
1666
1667static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1668{
bf95233e 1669 if (!nh->rif)
9665b745
IS
1670 return;
1671
1672 list_del(&nh->rif_list_node);
bf95233e 1673 nh->rif = NULL;
9665b745
IS
1674}
1675
a8c97014
IS
1676static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1677 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1678{
1679 struct mlxsw_sp_neigh_entry *neigh_entry;
a8c97014 1680 struct fib_nh *fib_nh = nh->key.fib_nh;
a7ff87ac 1681 struct neighbour *n;
93a87e5e 1682 u8 nud_state, dead;
c53b8e1b
IS
1683 int err;
1684
ad178c8e 1685 if (!nh->nh_grp->gateway || nh->neigh_entry)
b8399a1e
IS
1686 return 0;
1687
33b1341c
JP
1688 /* Take a reference of neigh here ensuring that neigh would
1689 * not be detructed before the nexthop entry is finished.
1690 * The reference is taken either in neigh_lookup() or
fd76d910 1691 * in neigh_create() in case n is not found.
33b1341c 1692 */
a8c97014 1693 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
33b1341c 1694 if (!n) {
a8c97014
IS
1695 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1696 if (IS_ERR(n))
1697 return PTR_ERR(n);
a7ff87ac 1698 neigh_event_send(n, NULL);
33b1341c
JP
1699 }
1700 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1701 if (!neigh_entry) {
5c8802f1
IS
1702 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1703 if (IS_ERR(neigh_entry)) {
c53b8e1b
IS
1704 err = -EINVAL;
1705 goto err_neigh_entry_create;
5c8802f1 1706 }
a7ff87ac 1707 }
b2157149
YG
1708
1709 /* If that is the first nexthop connected to that neigh, add to
1710 * nexthop_neighs_list
1711 */
1712 if (list_empty(&neigh_entry->nexthop_list))
1713 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
9011b677 1714 &mlxsw_sp->router->nexthop_neighs_list);
b2157149 1715
a7ff87ac
JP
1716 nh->neigh_entry = neigh_entry;
1717 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1718 read_lock_bh(&n->lock);
1719 nud_state = n->nud_state;
93a87e5e 1720 dead = n->dead;
a7ff87ac 1721 read_unlock_bh(&n->lock);
93a87e5e 1722 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
a7ff87ac
JP
1723
1724 return 0;
c53b8e1b
IS
1725
1726err_neigh_entry_create:
1727 neigh_release(n);
c53b8e1b 1728 return err;
a7ff87ac
JP
1729}
1730
a8c97014
IS
1731static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1732 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1733{
1734 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
a8c97014 1735 struct neighbour *n;
a7ff87ac 1736
b8399a1e 1737 if (!neigh_entry)
a8c97014
IS
1738 return;
1739 n = neigh_entry->key.n;
b8399a1e 1740
58312125 1741 __mlxsw_sp_nexthop_neigh_update(nh, true);
a7ff87ac 1742 list_del(&nh->neigh_list_node);
e58be79e 1743 nh->neigh_entry = NULL;
b2157149
YG
1744
1745 /* If that is the last nexthop connected to that neigh, remove from
1746 * nexthop_neighs_list
1747 */
e58be79e
IS
1748 if (list_empty(&neigh_entry->nexthop_list))
1749 list_del(&neigh_entry->nexthop_neighs_list_node);
b2157149 1750
5c8802f1
IS
1751 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1752 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1753
1754 neigh_release(n);
a8c97014 1755}
c53b8e1b 1756
a8c97014
IS
1757static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1758 struct mlxsw_sp_nexthop_group *nh_grp,
1759 struct mlxsw_sp_nexthop *nh,
1760 struct fib_nh *fib_nh)
1761{
1762 struct net_device *dev = fib_nh->nh_dev;
df6dd79b 1763 struct in_device *in_dev;
bf95233e 1764 struct mlxsw_sp_rif *rif;
a8c97014
IS
1765 int err;
1766
1767 nh->nh_grp = nh_grp;
1768 nh->key.fib_nh = fib_nh;
1769 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1770 if (err)
1771 return err;
1772
97989ee0
IS
1773 if (!dev)
1774 return 0;
1775
df6dd79b
IS
1776 in_dev = __in_dev_get_rtnl(dev);
1777 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1778 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1779 return 0;
1780
bf95233e
AS
1781 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1782 if (!rif)
a8c97014 1783 return 0;
bf95233e 1784 mlxsw_sp_nexthop_rif_init(nh, rif);
a8c97014
IS
1785
1786 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1787 if (err)
1788 goto err_nexthop_neigh_init;
1789
1790 return 0;
1791
1792err_nexthop_neigh_init:
a4e75b76 1793 mlxsw_sp_nexthop_rif_fini(nh);
a8c97014
IS
1794 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1795 return err;
1796}
1797
1798static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1799 struct mlxsw_sp_nexthop *nh)
1800{
1801 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
9665b745 1802 mlxsw_sp_nexthop_rif_fini(nh);
c53b8e1b 1803 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
a7ff87ac
JP
1804}
1805
ad178c8e
IS
1806static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1807 unsigned long event, struct fib_nh *fib_nh)
1808{
1809 struct mlxsw_sp_nexthop_key key;
1810 struct mlxsw_sp_nexthop *nh;
bf95233e 1811 struct mlxsw_sp_rif *rif;
ad178c8e 1812
9011b677 1813 if (mlxsw_sp->router->aborted)
ad178c8e
IS
1814 return;
1815
1816 key.fib_nh = fib_nh;
1817 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1818 if (WARN_ON_ONCE(!nh))
1819 return;
1820
bf95233e
AS
1821 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1822 if (!rif)
ad178c8e
IS
1823 return;
1824
1825 switch (event) {
1826 case FIB_EVENT_NH_ADD:
bf95233e 1827 mlxsw_sp_nexthop_rif_init(nh, rif);
ad178c8e
IS
1828 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1829 break;
1830 case FIB_EVENT_NH_DEL:
1831 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
9665b745 1832 mlxsw_sp_nexthop_rif_fini(nh);
ad178c8e
IS
1833 break;
1834 }
1835
1836 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1837}
1838
9665b745 1839static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 1840 struct mlxsw_sp_rif *rif)
9665b745
IS
1841{
1842 struct mlxsw_sp_nexthop *nh, *tmp;
1843
bf95233e 1844 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
9665b745
IS
1845 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1846 mlxsw_sp_nexthop_rif_fini(nh);
1847 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1848 }
1849}
1850
a7ff87ac
JP
1851static struct mlxsw_sp_nexthop_group *
1852mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1853{
1854 struct mlxsw_sp_nexthop_group *nh_grp;
1855 struct mlxsw_sp_nexthop *nh;
1856 struct fib_nh *fib_nh;
1857 size_t alloc_size;
1858 int i;
1859 int err;
1860
1861 alloc_size = sizeof(*nh_grp) +
1862 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1863 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1864 if (!nh_grp)
1865 return ERR_PTR(-ENOMEM);
1866 INIT_LIST_HEAD(&nh_grp->fib_list);
b3e8d1eb 1867 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
a7ff87ac 1868 nh_grp->count = fi->fib_nhs;
e9ad5e7d 1869 nh_grp->key.fi = fi;
a7ff87ac
JP
1870 for (i = 0; i < nh_grp->count; i++) {
1871 nh = &nh_grp->nexthops[i];
1872 fib_nh = &fi->fib_nh[i];
1873 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1874 if (err)
1875 goto err_nexthop_init;
1876 }
e9ad5e7d
IS
1877 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1878 if (err)
1879 goto err_nexthop_group_insert;
a7ff87ac
JP
1880 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1881 return nh_grp;
1882
e9ad5e7d 1883err_nexthop_group_insert:
a7ff87ac 1884err_nexthop_init:
df6dd79b
IS
1885 for (i--; i >= 0; i--) {
1886 nh = &nh_grp->nexthops[i];
a7ff87ac 1887 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
df6dd79b 1888 }
a7ff87ac
JP
1889 kfree(nh_grp);
1890 return ERR_PTR(err);
1891}
1892
1893static void
1894mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1895 struct mlxsw_sp_nexthop_group *nh_grp)
1896{
1897 struct mlxsw_sp_nexthop *nh;
1898 int i;
1899
e9ad5e7d 1900 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
a7ff87ac
JP
1901 for (i = 0; i < nh_grp->count; i++) {
1902 nh = &nh_grp->nexthops[i];
1903 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1904 }
58312125
IS
1905 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1906 WARN_ON_ONCE(nh_grp->adj_index_valid);
a7ff87ac
JP
1907 kfree(nh_grp);
1908}
1909
a7ff87ac
JP
1910static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1911 struct mlxsw_sp_fib_entry *fib_entry,
1912 struct fib_info *fi)
1913{
e9ad5e7d 1914 struct mlxsw_sp_nexthop_group_key key;
a7ff87ac
JP
1915 struct mlxsw_sp_nexthop_group *nh_grp;
1916
e9ad5e7d
IS
1917 key.fi = fi;
1918 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
a7ff87ac
JP
1919 if (!nh_grp) {
1920 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1921 if (IS_ERR(nh_grp))
1922 return PTR_ERR(nh_grp);
1923 }
1924 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1925 fib_entry->nh_group = nh_grp;
1926 return 0;
1927}
1928
1929static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1930 struct mlxsw_sp_fib_entry *fib_entry)
1931{
1932 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1933
1934 list_del(&fib_entry->nexthop_group_node);
1935 if (!list_empty(&nh_grp->fib_list))
1936 return;
1937 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1938}
1939
013b20f9
IS
1940static bool
1941mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1942{
1943 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1944
9aecce1c
IS
1945 if (fib_entry->params.tos)
1946 return false;
1947
013b20f9
IS
1948 switch (fib_entry->type) {
1949 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1950 return !!nh_group->adj_index_valid;
1951 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
70ad3506 1952 return !!nh_group->nh_rif;
013b20f9
IS
1953 default:
1954 return false;
1955 }
1956}
1957
1958static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1959{
1960 fib_entry->offloaded = true;
1961
76610ebb 1962 switch (fib_entry->fib_node->fib->proto) {
013b20f9
IS
1963 case MLXSW_SP_L3_PROTO_IPV4:
1964 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1965 break;
1966 case MLXSW_SP_L3_PROTO_IPV6:
1967 WARN_ON_ONCE(1);
1968 }
1969}
1970
1971static void
1972mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1973{
76610ebb 1974 switch (fib_entry->fib_node->fib->proto) {
013b20f9
IS
1975 case MLXSW_SP_L3_PROTO_IPV4:
1976 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1977 break;
1978 case MLXSW_SP_L3_PROTO_IPV6:
1979 WARN_ON_ONCE(1);
1980 }
1981
1982 fib_entry->offloaded = false;
1983}
1984
1985static void
1986mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1987 enum mlxsw_reg_ralue_op op, int err)
1988{
1989 switch (op) {
1990 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1991 if (!fib_entry->offloaded)
1992 return;
1993 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1994 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1995 if (err)
1996 return;
1997 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1998 !fib_entry->offloaded)
1999 mlxsw_sp_fib_entry_offload_set(fib_entry);
2000 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
2001 fib_entry->offloaded)
2002 mlxsw_sp_fib_entry_offload_unset(fib_entry);
2003 return;
2004 default:
2005 return;
2006 }
2007}
2008
a7ff87ac
JP
2009static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
2010 struct mlxsw_sp_fib_entry *fib_entry,
2011 enum mlxsw_reg_ralue_op op)
2012{
2013 char ralue_pl[MLXSW_REG_RALUE_LEN];
76610ebb 2014 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
9aecce1c 2015 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
a7ff87ac
JP
2016 enum mlxsw_reg_ralue_trap_action trap_action;
2017 u16 trap_id = 0;
2018 u32 adjacency_index = 0;
2019 u16 ecmp_size = 0;
2020
2021 /* In case the nexthop group adjacency index is valid, use it
2022 * with provided ECMP size. Otherwise, setup trap and pass
2023 * traffic to kernel.
2024 */
4b411477 2025 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
a7ff87ac
JP
2026 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2027 adjacency_index = fib_entry->nh_group->adj_index;
2028 ecmp_size = fib_entry->nh_group->ecmp_size;
2029 } else {
2030 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2031 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2032 }
2033
1a9234e6 2034 mlxsw_reg_ralue_pack4(ralue_pl,
76610ebb
IS
2035 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2036 fib->vr->id, fib_entry->fib_node->key.prefix_len,
9aecce1c 2037 *p_dip);
a7ff87ac
JP
2038 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
2039 adjacency_index, ecmp_size);
2040 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2041}
2042
61c503f9
JP
2043static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
2044 struct mlxsw_sp_fib_entry *fib_entry,
2045 enum mlxsw_reg_ralue_op op)
2046{
bf95233e 2047 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
76610ebb 2048 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
70ad3506 2049 enum mlxsw_reg_ralue_trap_action trap_action;
61c503f9 2050 char ralue_pl[MLXSW_REG_RALUE_LEN];
9aecce1c 2051 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
70ad3506 2052 u16 trap_id = 0;
bf95233e 2053 u16 rif_index = 0;
70ad3506
IS
2054
2055 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2056 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
bf95233e 2057 rif_index = rif->rif_index;
70ad3506
IS
2058 } else {
2059 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2060 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2061 }
61c503f9 2062
1a9234e6 2063 mlxsw_reg_ralue_pack4(ralue_pl,
76610ebb
IS
2064 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2065 fib->vr->id, fib_entry->fib_node->key.prefix_len,
9aecce1c 2066 *p_dip);
bf95233e
AS
2067 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2068 rif_index);
61c503f9
JP
2069 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2070}
2071
2072static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
2073 struct mlxsw_sp_fib_entry *fib_entry,
2074 enum mlxsw_reg_ralue_op op)
2075{
76610ebb 2076 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
61c503f9 2077 char ralue_pl[MLXSW_REG_RALUE_LEN];
9aecce1c 2078 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
61c503f9 2079
1a9234e6 2080 mlxsw_reg_ralue_pack4(ralue_pl,
76610ebb
IS
2081 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2082 fib->vr->id, fib_entry->fib_node->key.prefix_len,
9aecce1c 2083 *p_dip);
61c503f9
JP
2084 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2085 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2086}
2087
2088static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
2089 struct mlxsw_sp_fib_entry *fib_entry,
2090 enum mlxsw_reg_ralue_op op)
2091{
2092 switch (fib_entry->type) {
2093 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
a7ff87ac 2094 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
61c503f9
JP
2095 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2096 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
2097 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2098 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
2099 }
2100 return -EINVAL;
2101}
2102
2103static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2104 struct mlxsw_sp_fib_entry *fib_entry,
2105 enum mlxsw_reg_ralue_op op)
2106{
013b20f9
IS
2107 int err = -EINVAL;
2108
76610ebb 2109 switch (fib_entry->fib_node->fib->proto) {
61c503f9 2110 case MLXSW_SP_L3_PROTO_IPV4:
013b20f9
IS
2111 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
2112 break;
61c503f9 2113 case MLXSW_SP_L3_PROTO_IPV6:
013b20f9 2114 return err;
61c503f9 2115 }
013b20f9
IS
2116 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2117 return err;
61c503f9
JP
2118}
2119
2120static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2121 struct mlxsw_sp_fib_entry *fib_entry)
2122{
7146da31
JP
2123 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2124 MLXSW_REG_RALUE_OP_WRITE_WRITE);
61c503f9
JP
2125}
2126
2127static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2128 struct mlxsw_sp_fib_entry *fib_entry)
2129{
2130 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2131 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2132}
2133
61c503f9 2134static int
013b20f9
IS
2135mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2136 const struct fib_entry_notifier_info *fen_info,
2137 struct mlxsw_sp_fib_entry *fib_entry)
61c503f9 2138{
b45f64d1 2139 struct fib_info *fi = fen_info->fi;
61c503f9 2140
97989ee0
IS
2141 switch (fen_info->type) {
2142 case RTN_BROADCAST: /* fall through */
2143 case RTN_LOCAL:
61c503f9
JP
2144 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2145 return 0;
97989ee0
IS
2146 case RTN_UNREACHABLE: /* fall through */
2147 case RTN_BLACKHOLE: /* fall through */
2148 case RTN_PROHIBIT:
2149 /* Packets hitting these routes need to be trapped, but
2150 * can do so with a lower priority than packets directed
2151 * at the host, so use action type local instead of trap.
2152 */
61c503f9 2153 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
97989ee0
IS
2154 return 0;
2155 case RTN_UNICAST:
2156 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2157 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2158 else
2159 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2160 return 0;
2161 default:
2162 return -EINVAL;
2163 }
a7ff87ac
JP
2164}
2165
5b004412 2166static struct mlxsw_sp_fib_entry *
9aecce1c
IS
2167mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2168 struct mlxsw_sp_fib_node *fib_node,
2169 const struct fib_entry_notifier_info *fen_info)
61c503f9 2170{
61c503f9 2171 struct mlxsw_sp_fib_entry *fib_entry;
61c503f9
JP
2172 int err;
2173
9aecce1c 2174 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
61c503f9
JP
2175 if (!fib_entry) {
2176 err = -ENOMEM;
9aecce1c 2177 goto err_fib_entry_alloc;
61c503f9 2178 }
61c503f9 2179
013b20f9 2180 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
61c503f9 2181 if (err)
013b20f9 2182 goto err_fib4_entry_type_set;
61c503f9 2183
9aecce1c 2184 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
b8399a1e
IS
2185 if (err)
2186 goto err_nexthop_group_get;
2187
9aecce1c
IS
2188 fib_entry->params.prio = fen_info->fi->fib_priority;
2189 fib_entry->params.tb_id = fen_info->tb_id;
2190 fib_entry->params.type = fen_info->type;
2191 fib_entry->params.tos = fen_info->tos;
2192
2193 fib_entry->fib_node = fib_node;
2194
5b004412
JP
2195 return fib_entry;
2196
b8399a1e 2197err_nexthop_group_get:
013b20f9 2198err_fib4_entry_type_set:
9aecce1c
IS
2199 kfree(fib_entry);
2200err_fib_entry_alloc:
5b004412
JP
2201 return ERR_PTR(err);
2202}
2203
9aecce1c
IS
2204static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2205 struct mlxsw_sp_fib_entry *fib_entry)
2206{
2207 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2208 kfree(fib_entry);
2209}
2210
2211static struct mlxsw_sp_fib_node *
2212mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2213 const struct fib_entry_notifier_info *fen_info);
2214
5b004412 2215static struct mlxsw_sp_fib_entry *
9aecce1c
IS
2216mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2217 const struct fib_entry_notifier_info *fen_info)
5b004412 2218{
9aecce1c
IS
2219 struct mlxsw_sp_fib_entry *fib_entry;
2220 struct mlxsw_sp_fib_node *fib_node;
5b004412 2221
9aecce1c
IS
2222 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2223 if (IS_ERR(fib_node))
2224 return NULL;
2225
2226 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2227 if (fib_entry->params.tb_id == fen_info->tb_id &&
2228 fib_entry->params.tos == fen_info->tos &&
2229 fib_entry->params.type == fen_info->type &&
2230 fib_entry->nh_group->key.fi == fen_info->fi) {
2231 return fib_entry;
2232 }
2233 }
2234
2235 return NULL;
2236}
2237
2238static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2239 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2240 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2241 .key_len = sizeof(struct mlxsw_sp_fib_key),
2242 .automatic_shrinking = true,
2243};
2244
2245static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2246 struct mlxsw_sp_fib_node *fib_node)
2247{
2248 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2249 mlxsw_sp_fib_ht_params);
2250}
2251
2252static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2253 struct mlxsw_sp_fib_node *fib_node)
2254{
2255 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2256 mlxsw_sp_fib_ht_params);
2257}
2258
2259static struct mlxsw_sp_fib_node *
2260mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2261 size_t addr_len, unsigned char prefix_len)
2262{
2263 struct mlxsw_sp_fib_key key;
2264
2265 memset(&key, 0, sizeof(key));
2266 memcpy(key.addr, addr, addr_len);
2267 key.prefix_len = prefix_len;
2268 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2269}
2270
2271static struct mlxsw_sp_fib_node *
76610ebb 2272mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
9aecce1c
IS
2273 size_t addr_len, unsigned char prefix_len)
2274{
2275 struct mlxsw_sp_fib_node *fib_node;
2276
2277 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2278 if (!fib_node)
5b004412
JP
2279 return NULL;
2280
9aecce1c 2281 INIT_LIST_HEAD(&fib_node->entry_list);
76610ebb 2282 list_add(&fib_node->list, &fib->node_list);
9aecce1c
IS
2283 memcpy(fib_node->key.addr, addr, addr_len);
2284 fib_node->key.prefix_len = prefix_len;
9aecce1c
IS
2285
2286 return fib_node;
2287}
2288
2289static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2290{
9aecce1c
IS
2291 list_del(&fib_node->list);
2292 WARN_ON(!list_empty(&fib_node->entry_list));
2293 kfree(fib_node);
2294}
2295
2296static bool
2297mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2298 const struct mlxsw_sp_fib_entry *fib_entry)
2299{
2300 return list_first_entry(&fib_node->entry_list,
2301 struct mlxsw_sp_fib_entry, list) == fib_entry;
2302}
2303
2304static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2305{
2306 unsigned char prefix_len = fib_node->key.prefix_len;
76610ebb 2307 struct mlxsw_sp_fib *fib = fib_node->fib;
9aecce1c
IS
2308
2309 if (fib->prefix_ref_count[prefix_len]++ == 0)
2310 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2311}
2312
2313static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2314{
2315 unsigned char prefix_len = fib_node->key.prefix_len;
76610ebb 2316 struct mlxsw_sp_fib *fib = fib_node->fib;
9aecce1c
IS
2317
2318 if (--fib->prefix_ref_count[prefix_len] == 0)
2319 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
5b004412
JP
2320}
2321
76610ebb
IS
2322static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2323 struct mlxsw_sp_fib_node *fib_node,
2324 struct mlxsw_sp_fib *fib)
2325{
2326 struct mlxsw_sp_prefix_usage req_prefix_usage;
2327 struct mlxsw_sp_lpm_tree *lpm_tree;
2328 int err;
2329
2330 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2331 if (err)
2332 return err;
2333 fib_node->fib = fib;
2334
2335 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2336 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2337
2338 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2339 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2340 &req_prefix_usage);
2341 if (err)
2342 goto err_tree_check;
2343 } else {
2344 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2345 fib->proto);
2346 if (IS_ERR(lpm_tree))
2347 return PTR_ERR(lpm_tree);
2348 fib->lpm_tree = lpm_tree;
2349 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2350 if (err)
2351 goto err_tree_bind;
2352 }
2353
2354 mlxsw_sp_fib_node_prefix_inc(fib_node);
2355
2356 return 0;
2357
2358err_tree_bind:
2359 fib->lpm_tree = NULL;
2360 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2361err_tree_check:
2362 fib_node->fib = NULL;
2363 mlxsw_sp_fib_node_remove(fib, fib_node);
2364 return err;
2365}
2366
2367static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2368 struct mlxsw_sp_fib_node *fib_node)
2369{
2370 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2371 struct mlxsw_sp_fib *fib = fib_node->fib;
2372
2373 mlxsw_sp_fib_node_prefix_dec(fib_node);
2374
2375 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2376 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2377 fib->lpm_tree = NULL;
2378 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2379 } else {
2380 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2381 }
2382
2383 fib_node->fib = NULL;
2384 mlxsw_sp_fib_node_remove(fib, fib_node);
2385}
2386
9aecce1c
IS
2387static struct mlxsw_sp_fib_node *
2388mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2389 const struct fib_entry_notifier_info *fen_info)
5b004412 2390{
9aecce1c 2391 struct mlxsw_sp_fib_node *fib_node;
76610ebb 2392 struct mlxsw_sp_fib *fib;
9aecce1c
IS
2393 struct mlxsw_sp_vr *vr;
2394 int err;
2395
76610ebb 2396 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
9aecce1c
IS
2397 if (IS_ERR(vr))
2398 return ERR_CAST(vr);
76610ebb 2399 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
9aecce1c 2400
76610ebb 2401 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
9aecce1c
IS
2402 sizeof(fen_info->dst),
2403 fen_info->dst_len);
2404 if (fib_node)
2405 return fib_node;
5b004412 2406
76610ebb 2407 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
9aecce1c
IS
2408 sizeof(fen_info->dst),
2409 fen_info->dst_len);
2410 if (!fib_node) {
2411 err = -ENOMEM;
2412 goto err_fib_node_create;
5b004412 2413 }
9aecce1c 2414
76610ebb
IS
2415 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2416 if (err)
2417 goto err_fib_node_init;
2418
9aecce1c
IS
2419 return fib_node;
2420
76610ebb
IS
2421err_fib_node_init:
2422 mlxsw_sp_fib_node_destroy(fib_node);
9aecce1c 2423err_fib_node_create:
76610ebb 2424 mlxsw_sp_vr_put(vr);
9aecce1c 2425 return ERR_PTR(err);
5b004412
JP
2426}
2427
9aecce1c
IS
2428static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2429 struct mlxsw_sp_fib_node *fib_node)
5b004412 2430{
76610ebb 2431 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
5b004412 2432
9aecce1c
IS
2433 if (!list_empty(&fib_node->entry_list))
2434 return;
76610ebb 2435 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
9aecce1c 2436 mlxsw_sp_fib_node_destroy(fib_node);
76610ebb 2437 mlxsw_sp_vr_put(vr);
61c503f9
JP
2438}
2439
9aecce1c
IS
2440static struct mlxsw_sp_fib_entry *
2441mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2442 const struct mlxsw_sp_fib_entry_params *params)
61c503f9 2443{
61c503f9 2444 struct mlxsw_sp_fib_entry *fib_entry;
9aecce1c
IS
2445
2446 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2447 if (fib_entry->params.tb_id > params->tb_id)
2448 continue;
2449 if (fib_entry->params.tb_id != params->tb_id)
2450 break;
2451 if (fib_entry->params.tos > params->tos)
2452 continue;
2453 if (fib_entry->params.prio >= params->prio ||
2454 fib_entry->params.tos < params->tos)
2455 return fib_entry;
2456 }
2457
2458 return NULL;
2459}
2460
4283bce5
IS
2461static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2462 struct mlxsw_sp_fib_entry *new_entry)
2463{
2464 struct mlxsw_sp_fib_node *fib_node;
2465
2466 if (WARN_ON(!fib_entry))
2467 return -EINVAL;
2468
2469 fib_node = fib_entry->fib_node;
2470 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2471 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2472 fib_entry->params.tos != new_entry->params.tos ||
2473 fib_entry->params.prio != new_entry->params.prio)
2474 break;
2475 }
2476
2477 list_add_tail(&new_entry->list, &fib_entry->list);
2478 return 0;
2479}
2480
9aecce1c
IS
2481static int
2482mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
4283bce5 2483 struct mlxsw_sp_fib_entry *new_entry,
599cf8f9 2484 bool replace, bool append)
9aecce1c
IS
2485{
2486 struct mlxsw_sp_fib_entry *fib_entry;
2487
2488 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2489
4283bce5
IS
2490 if (append)
2491 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
599cf8f9
IS
2492 if (replace && WARN_ON(!fib_entry))
2493 return -EINVAL;
4283bce5 2494
599cf8f9
IS
2495 /* Insert new entry before replaced one, so that we can later
2496 * remove the second.
2497 */
9aecce1c
IS
2498 if (fib_entry) {
2499 list_add_tail(&new_entry->list, &fib_entry->list);
2500 } else {
2501 struct mlxsw_sp_fib_entry *last;
2502
2503 list_for_each_entry(last, &fib_node->entry_list, list) {
2504 if (new_entry->params.tb_id > last->params.tb_id)
2505 break;
2506 fib_entry = last;
2507 }
2508
2509 if (fib_entry)
2510 list_add(&new_entry->list, &fib_entry->list);
2511 else
2512 list_add(&new_entry->list, &fib_node->entry_list);
2513 }
2514
2515 return 0;
2516}
2517
2518static void
2519mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2520{
2521 list_del(&fib_entry->list);
2522}
2523
2524static int
2525mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2526 const struct mlxsw_sp_fib_node *fib_node,
2527 struct mlxsw_sp_fib_entry *fib_entry)
2528{
2529 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2530 return 0;
2531
2532 /* To prevent packet loss, overwrite the previously offloaded
2533 * entry.
2534 */
2535 if (!list_is_singular(&fib_node->entry_list)) {
2536 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2537 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2538
2539 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2540 }
2541
2542 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2543}
2544
2545static void
2546mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2547 const struct mlxsw_sp_fib_node *fib_node,
2548 struct mlxsw_sp_fib_entry *fib_entry)
2549{
2550 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2551 return;
2552
2553 /* Promote the next entry by overwriting the deleted entry */
2554 if (!list_is_singular(&fib_node->entry_list)) {
2555 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2556 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2557
2558 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2559 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2560 return;
2561 }
2562
2563 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2564}
2565
2566static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4283bce5 2567 struct mlxsw_sp_fib_entry *fib_entry,
599cf8f9 2568 bool replace, bool append)
9aecce1c
IS
2569{
2570 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2571 int err;
2572
599cf8f9
IS
2573 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2574 append);
9aecce1c
IS
2575 if (err)
2576 return err;
2577
2578 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2579 if (err)
2580 goto err_fib4_node_entry_add;
2581
9aecce1c
IS
2582 return 0;
2583
2584err_fib4_node_entry_add:
2585 mlxsw_sp_fib4_node_list_remove(fib_entry);
2586 return err;
2587}
2588
2589static void
2590mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2591 struct mlxsw_sp_fib_entry *fib_entry)
2592{
2593 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2594
9aecce1c
IS
2595 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2596 mlxsw_sp_fib4_node_list_remove(fib_entry);
2597}
2598
599cf8f9
IS
2599static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2600 struct mlxsw_sp_fib_entry *fib_entry,
2601 bool replace)
2602{
2603 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2604 struct mlxsw_sp_fib_entry *replaced;
2605
2606 if (!replace)
2607 return;
2608
2609 /* We inserted the new entry before replaced one */
2610 replaced = list_next_entry(fib_entry, list);
2611
2612 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2613 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2614 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2615}
2616
9aecce1c
IS
2617static int
2618mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
4283bce5 2619 const struct fib_entry_notifier_info *fen_info,
599cf8f9 2620 bool replace, bool append)
9aecce1c
IS
2621{
2622 struct mlxsw_sp_fib_entry *fib_entry;
2623 struct mlxsw_sp_fib_node *fib_node;
61c503f9
JP
2624 int err;
2625
9011b677 2626 if (mlxsw_sp->router->aborted)
b45f64d1
JP
2627 return 0;
2628
9aecce1c
IS
2629 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2630 if (IS_ERR(fib_node)) {
2631 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2632 return PTR_ERR(fib_node);
b45f64d1 2633 }
61c503f9 2634
9aecce1c
IS
2635 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
2636 if (IS_ERR(fib_entry)) {
2637 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2638 err = PTR_ERR(fib_entry);
2639 goto err_fib4_entry_create;
2640 }
5b004412 2641
599cf8f9
IS
2642 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2643 append);
b45f64d1 2644 if (err) {
9aecce1c
IS
2645 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2646 goto err_fib4_node_entry_link;
b45f64d1 2647 }
9aecce1c 2648
599cf8f9
IS
2649 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2650
61c503f9
JP
2651 return 0;
2652
9aecce1c
IS
2653err_fib4_node_entry_link:
2654 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2655err_fib4_entry_create:
2656 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
61c503f9
JP
2657 return err;
2658}
2659
37956d78
JP
2660static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2661 struct fib_entry_notifier_info *fen_info)
61c503f9 2662{
61c503f9 2663 struct mlxsw_sp_fib_entry *fib_entry;
9aecce1c 2664 struct mlxsw_sp_fib_node *fib_node;
61c503f9 2665
9011b677 2666 if (mlxsw_sp->router->aborted)
37956d78 2667 return;
b45f64d1 2668
9aecce1c
IS
2669 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2670 if (WARN_ON(!fib_entry))
37956d78 2671 return;
9aecce1c 2672 fib_node = fib_entry->fib_node;
5b004412 2673
9aecce1c
IS
2674 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2675 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2676 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
61c503f9 2677}
b45f64d1
JP
2678
2679static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2680{
2681 char ralta_pl[MLXSW_REG_RALTA_LEN];
2682 char ralst_pl[MLXSW_REG_RALST_LEN];
b5d90e6d 2683 int i, err;
b45f64d1
JP
2684
2685 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2686 MLXSW_SP_LPM_TREE_MIN);
2687 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2688 if (err)
2689 return err;
2690
2691 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2692 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2693 if (err)
2694 return err;
2695
b5d90e6d 2696 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 2697 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
b5d90e6d
IS
2698 char raltb_pl[MLXSW_REG_RALTB_LEN];
2699 char ralue_pl[MLXSW_REG_RALUE_LEN];
b45f64d1 2700
b5d90e6d
IS
2701 if (!mlxsw_sp_vr_is_used(vr))
2702 continue;
2703
2704 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2705 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2706 MLXSW_SP_LPM_TREE_MIN);
2707 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2708 raltb_pl);
2709 if (err)
2710 return err;
2711
2712 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2713 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2714 0);
2715 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2716 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2717 ralue_pl);
2718 if (err)
2719 return err;
2720 }
2721
2722 return 0;
b45f64d1
JP
2723}
2724
9aecce1c
IS
2725static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2726 struct mlxsw_sp_fib_node *fib_node)
2727{
2728 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2729
2730 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2731 bool do_break = &tmp->list == &fib_node->entry_list;
2732
2733 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2734 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2735 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2736 /* Break when entry list is empty and node was freed.
2737 * Otherwise, we'll access freed memory in the next
2738 * iteration.
2739 */
2740 if (do_break)
2741 break;
2742 }
2743}
2744
2745static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2746 struct mlxsw_sp_fib_node *fib_node)
2747{
76610ebb 2748 switch (fib_node->fib->proto) {
9aecce1c
IS
2749 case MLXSW_SP_L3_PROTO_IPV4:
2750 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2751 break;
2752 case MLXSW_SP_L3_PROTO_IPV6:
2753 WARN_ON_ONCE(1);
2754 break;
2755 }
2756}
2757
76610ebb
IS
2758static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2759 struct mlxsw_sp_vr *vr,
2760 enum mlxsw_sp_l3proto proto)
b45f64d1 2761{
76610ebb 2762 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
9aecce1c 2763 struct mlxsw_sp_fib_node *fib_node, *tmp;
76610ebb
IS
2764
2765 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2766 bool do_break = &tmp->list == &fib->node_list;
2767
2768 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2769 if (do_break)
2770 break;
2771 }
2772}
2773
2774static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
2775{
b45f64d1 2776 int i;
b45f64d1 2777
c1a38311 2778 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 2779 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
ac571de9 2780
76610ebb 2781 if (!mlxsw_sp_vr_is_used(vr))
b45f64d1 2782 continue;
76610ebb 2783 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
b45f64d1 2784 }
ac571de9
IS
2785}
2786
2787static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2788{
2789 int err;
2790
9011b677 2791 if (mlxsw_sp->router->aborted)
d331d303
IS
2792 return;
2793 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
ac571de9 2794 mlxsw_sp_router_fib_flush(mlxsw_sp);
9011b677 2795 mlxsw_sp->router->aborted = true;
b45f64d1
JP
2796 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2797 if (err)
2798 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2799}
2800
3057224e 2801struct mlxsw_sp_fib_event_work {
a0e4761d 2802 struct work_struct work;
ad178c8e
IS
2803 union {
2804 struct fib_entry_notifier_info fen_info;
5d7bfd14 2805 struct fib_rule_notifier_info fr_info;
ad178c8e
IS
2806 struct fib_nh_notifier_info fnh_info;
2807 };
3057224e
IS
2808 struct mlxsw_sp *mlxsw_sp;
2809 unsigned long event;
2810};
2811
2812static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
b45f64d1 2813{
3057224e 2814 struct mlxsw_sp_fib_event_work *fib_work =
a0e4761d 2815 container_of(work, struct mlxsw_sp_fib_event_work, work);
3057224e 2816 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5d7bfd14 2817 struct fib_rule *rule;
599cf8f9 2818 bool replace, append;
b45f64d1
JP
2819 int err;
2820
3057224e
IS
2821 /* Protect internal structures from changes */
2822 rtnl_lock();
2823 switch (fib_work->event) {
599cf8f9 2824 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4283bce5 2825 case FIB_EVENT_ENTRY_APPEND: /* fall through */
b45f64d1 2826 case FIB_EVENT_ENTRY_ADD:
599cf8f9 2827 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
4283bce5
IS
2828 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2829 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
599cf8f9 2830 replace, append);
b45f64d1
JP
2831 if (err)
2832 mlxsw_sp_router_fib4_abort(mlxsw_sp);
3057224e 2833 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2834 break;
2835 case FIB_EVENT_ENTRY_DEL:
3057224e
IS
2836 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2837 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2838 break;
2839 case FIB_EVENT_RULE_ADD: /* fall through */
2840 case FIB_EVENT_RULE_DEL:
5d7bfd14 2841 rule = fib_work->fr_info.rule;
c7f6e665 2842 if (!fib4_rule_default(rule) && !rule->l3mdev)
5d7bfd14
IS
2843 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2844 fib_rule_put(rule);
b45f64d1 2845 break;
ad178c8e
IS
2846 case FIB_EVENT_NH_ADD: /* fall through */
2847 case FIB_EVENT_NH_DEL:
2848 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2849 fib_work->fnh_info.fib_nh);
2850 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2851 break;
b45f64d1 2852 }
3057224e
IS
2853 rtnl_unlock();
2854 kfree(fib_work);
2855}
2856
2857/* Called with rcu_read_lock() */
2858static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2859 unsigned long event, void *ptr)
2860{
3057224e
IS
2861 struct mlxsw_sp_fib_event_work *fib_work;
2862 struct fib_notifier_info *info = ptr;
7e39d115 2863 struct mlxsw_sp_router *router;
3057224e
IS
2864
2865 if (!net_eq(info->net, &init_net))
2866 return NOTIFY_DONE;
2867
2868 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2869 if (WARN_ON(!fib_work))
2870 return NOTIFY_BAD;
2871
a0e4761d 2872 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
7e39d115
IS
2873 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
2874 fib_work->mlxsw_sp = router->mlxsw_sp;
3057224e
IS
2875 fib_work->event = event;
2876
2877 switch (event) {
599cf8f9 2878 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4283bce5 2879 case FIB_EVENT_ENTRY_APPEND: /* fall through */
3057224e
IS
2880 case FIB_EVENT_ENTRY_ADD: /* fall through */
2881 case FIB_EVENT_ENTRY_DEL:
2882 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2883 /* Take referece on fib_info to prevent it from being
2884 * freed while work is queued. Release it afterwards.
2885 */
2886 fib_info_hold(fib_work->fen_info.fi);
2887 break;
5d7bfd14
IS
2888 case FIB_EVENT_RULE_ADD: /* fall through */
2889 case FIB_EVENT_RULE_DEL:
2890 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2891 fib_rule_get(fib_work->fr_info.rule);
2892 break;
ad178c8e
IS
2893 case FIB_EVENT_NH_ADD: /* fall through */
2894 case FIB_EVENT_NH_DEL:
2895 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2896 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2897 break;
3057224e
IS
2898 }
2899
a0e4761d 2900 mlxsw_core_schedule_work(&fib_work->work);
3057224e 2901
b45f64d1
JP
2902 return NOTIFY_DONE;
2903}
2904
4724ba56
IS
2905static struct mlxsw_sp_rif *
2906mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2907 const struct net_device *dev)
2908{
2909 int i;
2910
2911 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
5f9efffb
IS
2912 if (mlxsw_sp->router->rifs[i] &&
2913 mlxsw_sp->router->rifs[i]->dev == dev)
2914 return mlxsw_sp->router->rifs[i];
4724ba56
IS
2915
2916 return NULL;
2917}
2918
2919static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2920{
2921 char ritr_pl[MLXSW_REG_RITR_LEN];
2922 int err;
2923
2924 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2925 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2926 if (WARN_ON_ONCE(err))
2927 return err;
2928
2929 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2930 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2931}
2932
2933static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 2934 struct mlxsw_sp_rif *rif)
4724ba56 2935{
bf95233e
AS
2936 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2937 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2938 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
4724ba56
IS
2939}
2940
bf95233e 2941static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
4724ba56
IS
2942 const struct in_device *in_dev,
2943 unsigned long event)
2944{
2945 switch (event) {
2946 case NETDEV_UP:
bf95233e 2947 if (!rif)
4724ba56
IS
2948 return true;
2949 return false;
2950 case NETDEV_DOWN:
bf95233e
AS
2951 if (rif && !in_dev->ifa_list &&
2952 !netif_is_l3_slave(rif->dev))
4724ba56
IS
2953 return true;
2954 /* It is possible we already removed the RIF ourselves
2955 * if it was assigned to a netdev that is now a bridge
2956 * or LAG slave.
2957 */
2958 return false;
2959 }
2960
2961 return false;
2962}
2963
e4f3c1c1
IS
2964static enum mlxsw_sp_rif_type
2965mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
2966 const struct net_device *dev)
2967{
2968 enum mlxsw_sp_fid_type type;
2969
2970 /* RIF type is derived from the type of the underlying FID */
2971 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
2972 type = MLXSW_SP_FID_TYPE_8021Q;
2973 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
2974 type = MLXSW_SP_FID_TYPE_8021Q;
2975 else if (netif_is_bridge_master(dev))
2976 type = MLXSW_SP_FID_TYPE_8021D;
2977 else
2978 type = MLXSW_SP_FID_TYPE_RFID;
2979
2980 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
2981}
2982
de5ed99e 2983static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
4724ba56
IS
2984{
2985 int i;
2986
de5ed99e
IS
2987 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
2988 if (!mlxsw_sp->router->rifs[i]) {
2989 *p_rif_index = i;
2990 return 0;
2991 }
2992 }
4724ba56 2993
de5ed99e 2994 return -ENOBUFS;
4724ba56
IS
2995}
2996
e4f3c1c1
IS
2997static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
2998 u16 vr_id,
2999 struct net_device *l3_dev)
4724ba56 3000{
bf95233e 3001 struct mlxsw_sp_rif *rif;
4724ba56 3002
e4f3c1c1 3003 rif = kzalloc(rif_size, GFP_KERNEL);
bf95233e 3004 if (!rif)
4724ba56
IS
3005 return NULL;
3006
bf95233e
AS
3007 INIT_LIST_HEAD(&rif->nexthop_list);
3008 INIT_LIST_HEAD(&rif->neigh_list);
3009 ether_addr_copy(rif->addr, l3_dev->dev_addr);
3010 rif->mtu = l3_dev->mtu;
3011 rif->vr_id = vr_id;
3012 rif->dev = l3_dev;
3013 rif->rif_index = rif_index;
4724ba56 3014
bf95233e 3015 return rif;
4724ba56
IS
3016}
3017
5f9efffb
IS
3018struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
3019 u16 rif_index)
3020{
3021 return mlxsw_sp->router->rifs[rif_index];
3022}
3023
fd1b9d41
AS
3024u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
3025{
3026 return rif->rif_index;
3027}
3028
3029int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
3030{
3031 return rif->dev->ifindex;
3032}
3033
4724ba56 3034static struct mlxsw_sp_rif *
e4f3c1c1
IS
3035mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
3036 const struct mlxsw_sp_rif_params *params)
4724ba56 3037{
e4f3c1c1
IS
3038 u32 tb_id = l3mdev_fib_table(params->dev);
3039 const struct mlxsw_sp_rif_ops *ops;
3040 enum mlxsw_sp_rif_type type;
bf95233e 3041 struct mlxsw_sp_rif *rif;
a1107487
IS
3042 struct mlxsw_sp_fid *fid;
3043 struct mlxsw_sp_vr *vr;
3044 u16 rif_index;
4724ba56
IS
3045 int err;
3046
e4f3c1c1
IS
3047 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
3048 ops = mlxsw_sp->router->rif_ops_arr[type];
3049
c9ec53f0
IS
3050 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
3051 if (IS_ERR(vr))
3052 return ERR_CAST(vr);
3053
de5ed99e
IS
3054 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
3055 if (err)
3056 goto err_rif_index_alloc;
4724ba56 3057
e4f3c1c1 3058 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
a13a594d
IS
3059 if (!rif) {
3060 err = -ENOMEM;
3061 goto err_rif_alloc;
3062 }
e4f3c1c1
IS
3063 rif->mlxsw_sp = mlxsw_sp;
3064 rif->ops = ops;
a13a594d 3065
e4f3c1c1
IS
3066 fid = ops->fid_get(rif);
3067 if (IS_ERR(fid)) {
3068 err = PTR_ERR(fid);
3069 goto err_fid_get;
4d93ceeb 3070 }
e4f3c1c1 3071 rif->fid = fid;
4d93ceeb 3072
e4f3c1c1
IS
3073 if (ops->setup)
3074 ops->setup(rif, params);
3075
3076 err = ops->configure(rif);
4724ba56 3077 if (err)
e4f3c1c1 3078 goto err_configure;
4724ba56 3079
e4f3c1c1 3080 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
a1107487 3081 mlxsw_sp_fid_index(fid), true);
4724ba56
IS
3082 if (err)
3083 goto err_rif_fdb_op;
3084
e4f3c1c1 3085 mlxsw_sp_rif_counters_alloc(rif);
a1107487 3086 mlxsw_sp_fid_rif_set(fid, rif);
5f9efffb 3087 mlxsw_sp->router->rifs[rif_index] = rif;
6913229e 3088 vr->rif_count++;
4724ba56 3089
bf95233e 3090 return rif;
4724ba56 3091
4724ba56 3092err_rif_fdb_op:
e4f3c1c1
IS
3093 ops->deconfigure(rif);
3094err_configure:
a1107487
IS
3095 mlxsw_sp_fid_put(fid);
3096err_fid_get:
e4f3c1c1
IS
3097 kfree(rif);
3098err_rif_alloc:
de5ed99e 3099err_rif_index_alloc:
c9ec53f0 3100 mlxsw_sp_vr_put(vr);
4724ba56
IS
3101 return ERR_PTR(err);
3102}
3103
e4f3c1c1 3104void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
4724ba56 3105{
e4f3c1c1
IS
3106 const struct mlxsw_sp_rif_ops *ops = rif->ops;
3107 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
a1107487 3108 struct mlxsw_sp_fid *fid = rif->fid;
e4f3c1c1 3109 struct mlxsw_sp_vr *vr;
4724ba56 3110
bf95233e 3111 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
e4f3c1c1 3112 vr = &mlxsw_sp->router->vrs[rif->vr_id];
e0c0afd8 3113
6913229e 3114 vr->rif_count--;
e4f3c1c1 3115 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
a1107487 3116 mlxsw_sp_fid_rif_set(fid, NULL);
e4f3c1c1
IS
3117 mlxsw_sp_rif_counters_free(rif);
3118 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
3119 mlxsw_sp_fid_index(fid), false);
3120 ops->deconfigure(rif);
a1107487 3121 mlxsw_sp_fid_put(fid);
e4f3c1c1 3122 kfree(rif);
c9ec53f0 3123 mlxsw_sp_vr_put(vr);
4724ba56
IS
3124}
3125
e4f3c1c1
IS
3126static void
3127mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
3128 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
3129{
3130 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
3131
3132 params->vid = mlxsw_sp_port_vlan->vid;
3133 params->lag = mlxsw_sp_port->lagged;
3134 if (params->lag)
3135 params->lag_id = mlxsw_sp_port->lag_id;
3136 else
3137 params->system_port = mlxsw_sp_port->local_port;
3138}
3139
7cbecf24 3140static int
a1107487 3141mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
7cbecf24 3142 struct net_device *l3_dev)
4724ba56 3143{
7cbecf24 3144 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1b8f09a0 3145 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
7cbecf24 3146 u16 vid = mlxsw_sp_port_vlan->vid;
bf95233e 3147 struct mlxsw_sp_rif *rif;
a1107487 3148 struct mlxsw_sp_fid *fid;
03ea01e9 3149 int err;
4724ba56 3150
1b8f09a0 3151 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
bf95233e 3152 if (!rif) {
e4f3c1c1
IS
3153 struct mlxsw_sp_rif_params params = {
3154 .dev = l3_dev,
3155 };
3156
3157 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
3158 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
bf95233e
AS
3159 if (IS_ERR(rif))
3160 return PTR_ERR(rif);
4724ba56
IS
3161 }
3162
a1107487 3163 /* FID was already created, just take a reference */
e4f3c1c1 3164 fid = rif->ops->fid_get(rif);
a1107487
IS
3165 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
3166 if (err)
3167 goto err_fid_port_vid_map;
3168
7cbecf24 3169 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
03ea01e9
IS
3170 if (err)
3171 goto err_port_vid_learning_set;
3172
7cbecf24 3173 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
03ea01e9
IS
3174 BR_STATE_FORWARDING);
3175 if (err)
3176 goto err_port_vid_stp_set;
3177
a1107487 3178 mlxsw_sp_port_vlan->fid = fid;
4724ba56 3179
4724ba56 3180 return 0;
03ea01e9
IS
3181
3182err_port_vid_stp_set:
7cbecf24 3183 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
03ea01e9 3184err_port_vid_learning_set:
a1107487
IS
3185 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3186err_fid_port_vid_map:
3187 mlxsw_sp_fid_put(fid);
03ea01e9 3188 return err;
4724ba56
IS
3189}
3190
a1107487
IS
3191void
3192mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4724ba56 3193{
ce95e154 3194 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7cbecf24 3195 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
ce95e154 3196 u16 vid = mlxsw_sp_port_vlan->vid;
ce95e154 3197
a1107487
IS
3198 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
3199 return;
4aafc368 3200
a1107487 3201 mlxsw_sp_port_vlan->fid = NULL;
7cbecf24
IS
3202 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
3203 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
a1107487
IS
3204 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3205 /* If router port holds the last reference on the rFID, then the
3206 * associated Sub-port RIF will be destroyed.
3207 */
3208 mlxsw_sp_fid_put(fid);
4724ba56
IS
3209}
3210
7cbecf24
IS
3211static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
3212 struct net_device *port_dev,
3213 unsigned long event, u16 vid)
4724ba56
IS
3214{
3215 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
ce95e154 3216 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4724ba56 3217
ce95e154 3218 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
7cbecf24
IS
3219 if (WARN_ON(!mlxsw_sp_port_vlan))
3220 return -EINVAL;
4724ba56
IS
3221
3222 switch (event) {
3223 case NETDEV_UP:
a1107487 3224 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
7cbecf24 3225 l3_dev);
4724ba56 3226 case NETDEV_DOWN:
a1107487 3227 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4724ba56
IS
3228 break;
3229 }
3230
3231 return 0;
3232}
3233
3234static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3235 unsigned long event)
3236{
2b94e58d
JP
3237 if (netif_is_bridge_port(port_dev) ||
3238 netif_is_lag_port(port_dev) ||
3239 netif_is_ovs_port(port_dev))
4724ba56
IS
3240 return 0;
3241
7cbecf24 3242 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
4724ba56
IS
3243}
3244
3245static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3246 struct net_device *lag_dev,
3247 unsigned long event, u16 vid)
3248{
3249 struct net_device *port_dev;
3250 struct list_head *iter;
3251 int err;
3252
3253 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3254 if (mlxsw_sp_port_dev_check(port_dev)) {
7cbecf24
IS
3255 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
3256 port_dev,
3257 event, vid);
4724ba56
IS
3258 if (err)
3259 return err;
3260 }
3261 }
3262
3263 return 0;
3264}
3265
3266static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3267 unsigned long event)
3268{
3269 if (netif_is_bridge_port(lag_dev))
3270 return 0;
3271
3272 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3273}
3274
4724ba56 3275static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
4724ba56
IS
3276 unsigned long event)
3277{
3278 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
e4f3c1c1
IS
3279 struct mlxsw_sp_rif_params params = {
3280 .dev = l3_dev,
3281 };
a1107487 3282 struct mlxsw_sp_rif *rif;
4724ba56
IS
3283
3284 switch (event) {
3285 case NETDEV_UP:
e4f3c1c1
IS
3286 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
3287 if (IS_ERR(rif))
3288 return PTR_ERR(rif);
3289 break;
4724ba56 3290 case NETDEV_DOWN:
a1107487 3291 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
e4f3c1c1 3292 mlxsw_sp_rif_destroy(rif);
4724ba56
IS
3293 break;
3294 }
3295
3296 return 0;
3297}
3298
3299static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3300 unsigned long event)
3301{
3302 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4724ba56
IS
3303 u16 vid = vlan_dev_vlan_id(vlan_dev);
3304
6b27c8ad
IS
3305 if (netif_is_bridge_port(vlan_dev))
3306 return 0;
3307
4724ba56 3308 if (mlxsw_sp_port_dev_check(real_dev))
7cbecf24
IS
3309 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
3310 event, vid);
4724ba56
IS
3311 else if (netif_is_lag_master(real_dev))
3312 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3313 vid);
c57529e1 3314 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
a1107487 3315 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
4724ba56
IS
3316
3317 return 0;
3318}
3319
b1e45526
IS
3320static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3321 unsigned long event)
3322{
3323 if (mlxsw_sp_port_dev_check(dev))
3324 return mlxsw_sp_inetaddr_port_event(dev, event);
3325 else if (netif_is_lag_master(dev))
3326 return mlxsw_sp_inetaddr_lag_event(dev, event);
3327 else if (netif_is_bridge_master(dev))
a1107487 3328 return mlxsw_sp_inetaddr_bridge_event(dev, event);
b1e45526
IS
3329 else if (is_vlan_dev(dev))
3330 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3331 else
3332 return 0;
3333}
3334
4724ba56
IS
3335int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3336 unsigned long event, void *ptr)
3337{
3338 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3339 struct net_device *dev = ifa->ifa_dev->dev;
3340 struct mlxsw_sp *mlxsw_sp;
bf95233e 3341 struct mlxsw_sp_rif *rif;
4724ba56
IS
3342 int err = 0;
3343
3344 mlxsw_sp = mlxsw_sp_lower_get(dev);
3345 if (!mlxsw_sp)
3346 goto out;
3347
bf95233e
AS
3348 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3349 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
4724ba56
IS
3350 goto out;
3351
b1e45526 3352 err = __mlxsw_sp_inetaddr_event(dev, event);
4724ba56
IS
3353out:
3354 return notifier_from_errno(err);
3355}
3356
bf95233e 3357static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
4724ba56
IS
3358 const char *mac, int mtu)
3359{
3360 char ritr_pl[MLXSW_REG_RITR_LEN];
3361 int err;
3362
bf95233e 3363 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
4724ba56
IS
3364 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3365 if (err)
3366 return err;
3367
3368 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3369 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3370 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3371 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3372}
3373
3374int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3375{
3376 struct mlxsw_sp *mlxsw_sp;
bf95233e 3377 struct mlxsw_sp_rif *rif;
a1107487 3378 u16 fid_index;
4724ba56
IS
3379 int err;
3380
3381 mlxsw_sp = mlxsw_sp_lower_get(dev);
3382 if (!mlxsw_sp)
3383 return 0;
3384
bf95233e
AS
3385 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3386 if (!rif)
4724ba56 3387 return 0;
a1107487 3388 fid_index = mlxsw_sp_fid_index(rif->fid);
4724ba56 3389
a1107487 3390 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
4724ba56
IS
3391 if (err)
3392 return err;
3393
bf95233e
AS
3394 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3395 dev->mtu);
4724ba56
IS
3396 if (err)
3397 goto err_rif_edit;
3398
a1107487 3399 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
4724ba56
IS
3400 if (err)
3401 goto err_rif_fdb_op;
3402
bf95233e
AS
3403 ether_addr_copy(rif->addr, dev->dev_addr);
3404 rif->mtu = dev->mtu;
4724ba56 3405
bf95233e 3406 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
4724ba56
IS
3407
3408 return 0;
3409
3410err_rif_fdb_op:
bf95233e 3411 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
4724ba56 3412err_rif_edit:
a1107487 3413 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
4724ba56
IS
3414 return err;
3415}
3416
b1e45526
IS
3417static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3418 struct net_device *l3_dev)
7179eb5a 3419{
b1e45526 3420 struct mlxsw_sp_rif *rif;
7179eb5a 3421
b1e45526
IS
3422 /* If netdev is already associated with a RIF, then we need to
3423 * destroy it and create a new one with the new virtual router ID.
7179eb5a 3424 */
b1e45526
IS
3425 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3426 if (rif)
3427 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
7179eb5a 3428
b1e45526 3429 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
7179eb5a
IS
3430}
3431
b1e45526
IS
3432static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3433 struct net_device *l3_dev)
7179eb5a 3434{
b1e45526 3435 struct mlxsw_sp_rif *rif;
7179eb5a 3436
b1e45526
IS
3437 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3438 if (!rif)
7179eb5a 3439 return;
b1e45526 3440 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
7179eb5a
IS
3441}
3442
b1e45526
IS
3443int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3444 struct netdev_notifier_changeupper_info *info)
3d70e458 3445{
b1e45526
IS
3446 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3447 int err = 0;
3d70e458 3448
b1e45526
IS
3449 if (!mlxsw_sp)
3450 return 0;
3d70e458 3451
b1e45526
IS
3452 switch (event) {
3453 case NETDEV_PRECHANGEUPPER:
3454 return 0;
3455 case NETDEV_CHANGEUPPER:
3456 if (info->linking)
3457 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3458 else
3459 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3460 break;
3461 }
3d70e458 3462
b1e45526 3463 return err;
3d70e458
IS
3464}
3465
e4f3c1c1
IS
3466static struct mlxsw_sp_rif_subport *
3467mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
a1107487 3468{
e4f3c1c1
IS
3469 return container_of(rif, struct mlxsw_sp_rif_subport, common);
3470}
3471
3472static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
3473 const struct mlxsw_sp_rif_params *params)
3474{
3475 struct mlxsw_sp_rif_subport *rif_subport;
3476
3477 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3478 rif_subport->vid = params->vid;
3479 rif_subport->lag = params->lag;
3480 if (params->lag)
3481 rif_subport->lag_id = params->lag_id;
a1107487 3482 else
e4f3c1c1
IS
3483 rif_subport->system_port = params->system_port;
3484}
3485
3486static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
3487{
3488 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3489 struct mlxsw_sp_rif_subport *rif_subport;
3490 char ritr_pl[MLXSW_REG_RITR_LEN];
3491
3492 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3493 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
3494 rif->rif_index, rif->vr_id, rif->dev->mtu,
3495 rif->dev->dev_addr);
3496 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
3497 rif_subport->lag ? rif_subport->lag_id :
3498 rif_subport->system_port,
3499 rif_subport->vid);
3500
3501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3502}
3503
3504static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
3505{
3506 return mlxsw_sp_rif_subport_op(rif, true);
a1107487
IS
3507}
3508
e4f3c1c1
IS
3509static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
3510{
3511 mlxsw_sp_rif_subport_op(rif, false);
3512}
3513
3514static struct mlxsw_sp_fid *
3515mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
3516{
3517 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
3518}
3519
3520static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
3521 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
3522 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
3523 .setup = mlxsw_sp_rif_subport_setup,
3524 .configure = mlxsw_sp_rif_subport_configure,
3525 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
3526 .fid_get = mlxsw_sp_rif_subport_fid_get,
3527};
3528
3529static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
3530 enum mlxsw_reg_ritr_if_type type,
3531 u16 vid_fid, bool enable)
3532{
3533 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3534 char ritr_pl[MLXSW_REG_RITR_LEN];
3535
3536 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
3537 rif->dev->mtu, rif->dev->dev_addr);
3538 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
3539
3540 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3541}
3542
3543static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3544{
3545 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3546}
3547
3548static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
3549{
3550 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3551 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3552 int err;
3553
3554 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
3555 if (err)
3556 return err;
3557
3558 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3559 mlxsw_sp_router_port(mlxsw_sp), true);
3560 if (err)
3561 goto err_fid_bc_flood_set;
3562
3563 return 0;
3564
3565err_fid_bc_flood_set:
3566 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3567 return err;
3568}
3569
3570static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
3571{
3572 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3573 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3574
3575 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3576 mlxsw_sp_router_port(mlxsw_sp), false);
3577 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3578}
3579
3580static struct mlxsw_sp_fid *
3581mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
3582{
3583 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
3584
3585 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
3586}
3587
3588static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
3589 .type = MLXSW_SP_RIF_TYPE_VLAN,
3590 .rif_size = sizeof(struct mlxsw_sp_rif),
3591 .configure = mlxsw_sp_rif_vlan_configure,
3592 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
3593 .fid_get = mlxsw_sp_rif_vlan_fid_get,
3594};
3595
3596static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
3597{
3598 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3599 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3600 int err;
3601
3602 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
3603 true);
3604 if (err)
3605 return err;
3606
3607 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3608 mlxsw_sp_router_port(mlxsw_sp), true);
3609 if (err)
3610 goto err_fid_bc_flood_set;
3611
3612 return 0;
3613
3614err_fid_bc_flood_set:
3615 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3616 return err;
3617}
3618
3619static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
3620{
3621 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3622 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3623
3624 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3625 mlxsw_sp_router_port(mlxsw_sp), false);
3626 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3627}
3628
3629static struct mlxsw_sp_fid *
3630mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
3631{
3632 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
3633}
3634
3635static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
3636 .type = MLXSW_SP_RIF_TYPE_FID,
3637 .rif_size = sizeof(struct mlxsw_sp_rif),
3638 .configure = mlxsw_sp_rif_fid_configure,
3639 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
3640 .fid_get = mlxsw_sp_rif_fid_fid_get,
3641};
3642
3643static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
3644 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
3645 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
3646 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
3647};
3648
348b8fc3
IS
3649static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
3650{
3651 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3652
3653 mlxsw_sp->router->rifs = kcalloc(max_rifs,
3654 sizeof(struct mlxsw_sp_rif *),
3655 GFP_KERNEL);
3656 if (!mlxsw_sp->router->rifs)
3657 return -ENOMEM;
e4f3c1c1
IS
3658
3659 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
3660
348b8fc3
IS
3661 return 0;
3662}
3663
3664static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
3665{
3666 int i;
3667
3668 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3669 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
3670
3671 kfree(mlxsw_sp->router->rifs);
3672}
3673
c3852ef7
IS
3674static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3675{
7e39d115 3676 struct mlxsw_sp_router *router;
c3852ef7
IS
3677
3678 /* Flush pending FIB notifications and then flush the device's
3679 * table before requesting another dump. The FIB notification
3680 * block is unregistered, so no need to take RTNL.
3681 */
3682 mlxsw_core_flush_owq();
7e39d115
IS
3683 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3684 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
c3852ef7
IS
3685}
3686
4724ba56
IS
3687static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3688{
3689 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3690 u64 max_rifs;
3691 int err;
3692
3693 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3694 return -EIO;
4724ba56 3695 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
4724ba56
IS
3696
3697 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3698 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3699 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3700 if (err)
348b8fc3 3701 return err;
4724ba56 3702 return 0;
4724ba56
IS
3703}
3704
3705static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3706{
3707 char rgcr_pl[MLXSW_REG_RGCR_LEN];
4724ba56
IS
3708
3709 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3710 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
4724ba56
IS
3711}
3712
b45f64d1
JP
3713int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3714{
9011b677 3715 struct mlxsw_sp_router *router;
b45f64d1
JP
3716 int err;
3717
9011b677
IS
3718 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3719 if (!router)
3720 return -ENOMEM;
3721 mlxsw_sp->router = router;
3722 router->mlxsw_sp = mlxsw_sp;
3723
3724 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
b45f64d1
JP
3725 err = __mlxsw_sp_router_init(mlxsw_sp);
3726 if (err)
9011b677 3727 goto err_router_init;
b45f64d1 3728
348b8fc3
IS
3729 err = mlxsw_sp_rifs_init(mlxsw_sp);
3730 if (err)
3731 goto err_rifs_init;
3732
9011b677 3733 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
c53b8e1b
IS
3734 &mlxsw_sp_nexthop_ht_params);
3735 if (err)
3736 goto err_nexthop_ht_init;
3737
9011b677 3738 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
3739 &mlxsw_sp_nexthop_group_ht_params);
3740 if (err)
3741 goto err_nexthop_group_ht_init;
3742
8494ab06
IS
3743 err = mlxsw_sp_lpm_init(mlxsw_sp);
3744 if (err)
3745 goto err_lpm_init;
3746
b45f64d1
JP
3747 err = mlxsw_sp_vrs_init(mlxsw_sp);
3748 if (err)
3749 goto err_vrs_init;
3750
8c9583a8 3751 err = mlxsw_sp_neigh_init(mlxsw_sp);
b45f64d1
JP
3752 if (err)
3753 goto err_neigh_init;
3754
7e39d115
IS
3755 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
3756 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
c3852ef7
IS
3757 mlxsw_sp_router_fib_dump_flush);
3758 if (err)
3759 goto err_register_fib_notifier;
3760
b45f64d1
JP
3761 return 0;
3762
c3852ef7
IS
3763err_register_fib_notifier:
3764 mlxsw_sp_neigh_fini(mlxsw_sp);
b45f64d1
JP
3765err_neigh_init:
3766 mlxsw_sp_vrs_fini(mlxsw_sp);
3767err_vrs_init:
8494ab06
IS
3768 mlxsw_sp_lpm_fini(mlxsw_sp);
3769err_lpm_init:
9011b677 3770 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
e9ad5e7d 3771err_nexthop_group_ht_init:
9011b677 3772 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
c53b8e1b 3773err_nexthop_ht_init:
348b8fc3
IS
3774 mlxsw_sp_rifs_fini(mlxsw_sp);
3775err_rifs_init:
b45f64d1 3776 __mlxsw_sp_router_fini(mlxsw_sp);
9011b677
IS
3777err_router_init:
3778 kfree(mlxsw_sp->router);
b45f64d1
JP
3779 return err;
3780}
3781
3782void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3783{
7e39d115 3784 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
b45f64d1
JP
3785 mlxsw_sp_neigh_fini(mlxsw_sp);
3786 mlxsw_sp_vrs_fini(mlxsw_sp);
8494ab06 3787 mlxsw_sp_lpm_fini(mlxsw_sp);
9011b677
IS
3788 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3789 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
348b8fc3 3790 mlxsw_sp_rifs_fini(mlxsw_sp);
b45f64d1 3791 __mlxsw_sp_router_fini(mlxsw_sp);
9011b677 3792 kfree(mlxsw_sp->router);
b45f64d1 3793}