]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Merge tag 'powerpc-4.13-8' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_router.c
CommitLineData
464dce18
IS
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
c723c735 6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
464dce18
IS
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
5e9c16cc
JP
39#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
c723c735 42#include <linux/notifier.h>
df6dd79b 43#include <linux/inetdevice.h>
9db032bb 44#include <linux/netdevice.h>
03ea01e9 45#include <linux/if_bridge.h>
c723c735 46#include <net/netevent.h>
6cf3c971
JP
47#include <net/neighbour.h>
48#include <net/arp.h>
b45f64d1 49#include <net/ip_fib.h>
5d7bfd14 50#include <net/fib_rules.h>
57837885 51#include <net/l3mdev.h>
464dce18
IS
52
53#include "spectrum.h"
54#include "core.h"
55#include "reg.h"
e0c0afd8
AS
56#include "spectrum_cnt.h"
57#include "spectrum_dpipe.h"
58#include "spectrum_router.h"
464dce18 59
9011b677
IS
60struct mlxsw_sp_vr;
61struct mlxsw_sp_lpm_tree;
e4f3c1c1 62struct mlxsw_sp_rif_ops;
9011b677
IS
63
64struct mlxsw_sp_router {
65 struct mlxsw_sp *mlxsw_sp;
5f9efffb 66 struct mlxsw_sp_rif **rifs;
9011b677
IS
67 struct mlxsw_sp_vr *vrs;
68 struct rhashtable neigh_ht;
69 struct rhashtable nexthop_group_ht;
70 struct rhashtable nexthop_ht;
71 struct {
72 struct mlxsw_sp_lpm_tree *trees;
73 unsigned int tree_count;
74 } lpm;
75 struct {
76 struct delayed_work dw;
77 unsigned long interval; /* ms */
78 } neighs_update;
79 struct delayed_work nexthop_probe_dw;
80#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
81 struct list_head nexthop_neighs_list;
82 bool aborted;
7e39d115 83 struct notifier_block fib_nb;
e4f3c1c1 84 const struct mlxsw_sp_rif_ops **rif_ops_arr;
9011b677
IS
85};
86
4724ba56
IS
87struct mlxsw_sp_rif {
88 struct list_head nexthop_list;
89 struct list_head neigh_list;
90 struct net_device *dev;
a1107487 91 struct mlxsw_sp_fid *fid;
4724ba56
IS
92 unsigned char addr[ETH_ALEN];
93 int mtu;
bf95233e 94 u16 rif_index;
6913229e 95 u16 vr_id;
e4f3c1c1
IS
96 const struct mlxsw_sp_rif_ops *ops;
97 struct mlxsw_sp *mlxsw_sp;
98
e0c0afd8
AS
99 unsigned int counter_ingress;
100 bool counter_ingress_valid;
101 unsigned int counter_egress;
102 bool counter_egress_valid;
4724ba56
IS
103};
104
e4f3c1c1
IS
105struct mlxsw_sp_rif_params {
106 struct net_device *dev;
107 union {
108 u16 system_port;
109 u16 lag_id;
110 };
111 u16 vid;
112 bool lag;
113};
114
4d93ceeb
IS
115struct mlxsw_sp_rif_subport {
116 struct mlxsw_sp_rif common;
117 union {
118 u16 system_port;
119 u16 lag_id;
120 };
121 u16 vid;
122 bool lag;
123};
124
e4f3c1c1
IS
125struct mlxsw_sp_rif_ops {
126 enum mlxsw_sp_rif_type type;
127 size_t rif_size;
128
129 void (*setup)(struct mlxsw_sp_rif *rif,
130 const struct mlxsw_sp_rif_params *params);
131 int (*configure)(struct mlxsw_sp_rif *rif);
132 void (*deconfigure)(struct mlxsw_sp_rif *rif);
133 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
134};
135
e0c0afd8
AS
136static unsigned int *
137mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
138 enum mlxsw_sp_rif_counter_dir dir)
139{
140 switch (dir) {
141 case MLXSW_SP_RIF_COUNTER_EGRESS:
142 return &rif->counter_egress;
143 case MLXSW_SP_RIF_COUNTER_INGRESS:
144 return &rif->counter_ingress;
145 }
146 return NULL;
147}
148
149static bool
150mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
151 enum mlxsw_sp_rif_counter_dir dir)
152{
153 switch (dir) {
154 case MLXSW_SP_RIF_COUNTER_EGRESS:
155 return rif->counter_egress_valid;
156 case MLXSW_SP_RIF_COUNTER_INGRESS:
157 return rif->counter_ingress_valid;
158 }
159 return false;
160}
161
162static void
163mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
164 enum mlxsw_sp_rif_counter_dir dir,
165 bool valid)
166{
167 switch (dir) {
168 case MLXSW_SP_RIF_COUNTER_EGRESS:
169 rif->counter_egress_valid = valid;
170 break;
171 case MLXSW_SP_RIF_COUNTER_INGRESS:
172 rif->counter_ingress_valid = valid;
173 break;
174 }
175}
176
177static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
178 unsigned int counter_index, bool enable,
179 enum mlxsw_sp_rif_counter_dir dir)
180{
181 char ritr_pl[MLXSW_REG_RITR_LEN];
182 bool is_egress = false;
183 int err;
184
185 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
186 is_egress = true;
187 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
188 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
189 if (err)
190 return err;
191
192 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
193 is_egress);
194 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
195}
196
197int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
198 struct mlxsw_sp_rif *rif,
199 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
200{
201 char ricnt_pl[MLXSW_REG_RICNT_LEN];
202 unsigned int *p_counter_index;
203 bool valid;
204 int err;
205
206 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
207 if (!valid)
208 return -EINVAL;
209
210 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
211 if (!p_counter_index)
212 return -EINVAL;
213 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
214 MLXSW_REG_RICNT_OPCODE_NOP);
215 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
216 if (err)
217 return err;
218 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
219 return 0;
220}
221
222static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
223 unsigned int counter_index)
224{
225 char ricnt_pl[MLXSW_REG_RICNT_LEN];
226
227 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
228 MLXSW_REG_RICNT_OPCODE_CLEAR);
229 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
230}
231
232int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
233 struct mlxsw_sp_rif *rif,
234 enum mlxsw_sp_rif_counter_dir dir)
235{
236 unsigned int *p_counter_index;
237 int err;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
243 p_counter_index);
244 if (err)
245 return err;
246
247 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
248 if (err)
249 goto err_counter_clear;
250
251 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
252 *p_counter_index, true, dir);
253 if (err)
254 goto err_counter_edit;
255 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
256 return 0;
257
258err_counter_edit:
259err_counter_clear:
260 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
261 *p_counter_index);
262 return err;
263}
264
265void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
266 struct mlxsw_sp_rif *rif,
267 enum mlxsw_sp_rif_counter_dir dir)
268{
269 unsigned int *p_counter_index;
270
6b1206bb
AS
271 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
272 return;
273
e0c0afd8
AS
274 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
275 if (WARN_ON(!p_counter_index))
276 return;
277 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
278 *p_counter_index, false, dir);
279 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
280 *p_counter_index);
281 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
282}
283
e4f3c1c1
IS
284static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
285{
286 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
287 struct devlink *devlink;
288
289 devlink = priv_to_devlink(mlxsw_sp->core);
290 if (!devlink_dpipe_table_counter_enabled(devlink,
291 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
292 return;
293 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
294}
295
296static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
297{
298 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
299
300 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
301}
302
4724ba56
IS
303static struct mlxsw_sp_rif *
304mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
305 const struct net_device *dev);
306
9011b677
IS
307#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
308
309struct mlxsw_sp_prefix_usage {
310 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
311};
312
53342023
JP
313#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
314 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
315
6b75c480
JP
316static bool
317mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
318 struct mlxsw_sp_prefix_usage *prefix_usage2)
319{
320 unsigned char prefix;
321
322 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
323 if (!test_bit(prefix, prefix_usage2->b))
324 return false;
325 }
326 return true;
327}
328
53342023
JP
329static bool
330mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
331 struct mlxsw_sp_prefix_usage *prefix_usage2)
332{
333 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
334}
335
6b75c480
JP
336static bool
337mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
338{
339 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
340
341 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
342}
343
344static void
345mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
346 struct mlxsw_sp_prefix_usage *prefix_usage2)
347{
348 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
349}
350
5e9c16cc
JP
351static void
352mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
353 unsigned char prefix_len)
354{
355 set_bit(prefix_len, prefix_usage->b);
356}
357
358static void
359mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
360 unsigned char prefix_len)
361{
362 clear_bit(prefix_len, prefix_usage->b);
363}
364
365struct mlxsw_sp_fib_key {
366 unsigned char addr[sizeof(struct in6_addr)];
367 unsigned char prefix_len;
368};
369
61c503f9
JP
370enum mlxsw_sp_fib_entry_type {
371 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
372 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
373 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
374};
375
a7ff87ac 376struct mlxsw_sp_nexthop_group;
9011b677 377struct mlxsw_sp_fib;
a7ff87ac 378
9aecce1c
IS
379struct mlxsw_sp_fib_node {
380 struct list_head entry_list;
b45f64d1 381 struct list_head list;
9aecce1c 382 struct rhash_head ht_node;
76610ebb 383 struct mlxsw_sp_fib *fib;
5e9c16cc 384 struct mlxsw_sp_fib_key key;
9aecce1c
IS
385};
386
387struct mlxsw_sp_fib_entry_params {
388 u32 tb_id;
389 u32 prio;
390 u8 tos;
391 u8 type;
392};
393
394struct mlxsw_sp_fib_entry {
395 struct list_head list;
396 struct mlxsw_sp_fib_node *fib_node;
61c503f9 397 enum mlxsw_sp_fib_entry_type type;
a7ff87ac
JP
398 struct list_head nexthop_group_node;
399 struct mlxsw_sp_nexthop_group *nh_group;
9aecce1c 400 struct mlxsw_sp_fib_entry_params params;
013b20f9 401 bool offloaded;
5e9c16cc
JP
402};
403
9011b677
IS
404enum mlxsw_sp_l3proto {
405 MLXSW_SP_L3_PROTO_IPV4,
406 MLXSW_SP_L3_PROTO_IPV6,
407};
408
409struct mlxsw_sp_lpm_tree {
410 u8 id; /* tree ID */
411 unsigned int ref_count;
412 enum mlxsw_sp_l3proto proto;
413 struct mlxsw_sp_prefix_usage prefix_usage;
414};
415
5e9c16cc
JP
416struct mlxsw_sp_fib {
417 struct rhashtable ht;
9aecce1c 418 struct list_head node_list;
76610ebb
IS
419 struct mlxsw_sp_vr *vr;
420 struct mlxsw_sp_lpm_tree *lpm_tree;
5e9c16cc
JP
421 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
422 struct mlxsw_sp_prefix_usage prefix_usage;
76610ebb 423 enum mlxsw_sp_l3proto proto;
5e9c16cc
JP
424};
425
9011b677
IS
426struct mlxsw_sp_vr {
427 u16 id; /* virtual router ID */
428 u32 tb_id; /* kernel fib table id */
429 unsigned int rif_count;
430 struct mlxsw_sp_fib *fib4;
431};
432
9aecce1c 433static const struct rhashtable_params mlxsw_sp_fib_ht_params;
5e9c16cc 434
76610ebb
IS
435static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
436 enum mlxsw_sp_l3proto proto)
5e9c16cc
JP
437{
438 struct mlxsw_sp_fib *fib;
439 int err;
440
441 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
442 if (!fib)
443 return ERR_PTR(-ENOMEM);
444 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
445 if (err)
446 goto err_rhashtable_init;
9aecce1c 447 INIT_LIST_HEAD(&fib->node_list);
76610ebb
IS
448 fib->proto = proto;
449 fib->vr = vr;
5e9c16cc
JP
450 return fib;
451
452err_rhashtable_init:
453 kfree(fib);
454 return ERR_PTR(err);
455}
456
457static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
458{
9aecce1c 459 WARN_ON(!list_empty(&fib->node_list));
76610ebb 460 WARN_ON(fib->lpm_tree);
5e9c16cc
JP
461 rhashtable_destroy(&fib->ht);
462 kfree(fib);
463}
464
53342023 465static struct mlxsw_sp_lpm_tree *
382dbb40 466mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
53342023
JP
467{
468 static struct mlxsw_sp_lpm_tree *lpm_tree;
469 int i;
470
9011b677
IS
471 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
472 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
382dbb40
IS
473 if (lpm_tree->ref_count == 0)
474 return lpm_tree;
53342023
JP
475 }
476 return NULL;
477}
478
479static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
480 struct mlxsw_sp_lpm_tree *lpm_tree)
481{
482 char ralta_pl[MLXSW_REG_RALTA_LEN];
483
1a9234e6
IS
484 mlxsw_reg_ralta_pack(ralta_pl, true,
485 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
486 lpm_tree->id);
53342023
JP
487 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
488}
489
490static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
491 struct mlxsw_sp_lpm_tree *lpm_tree)
492{
493 char ralta_pl[MLXSW_REG_RALTA_LEN];
494
1a9234e6
IS
495 mlxsw_reg_ralta_pack(ralta_pl, false,
496 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
497 lpm_tree->id);
53342023
JP
498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
499}
500
501static int
502mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
503 struct mlxsw_sp_prefix_usage *prefix_usage,
504 struct mlxsw_sp_lpm_tree *lpm_tree)
505{
506 char ralst_pl[MLXSW_REG_RALST_LEN];
507 u8 root_bin = 0;
508 u8 prefix;
509 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
510
511 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
512 root_bin = prefix;
513
514 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
515 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
516 if (prefix == 0)
517 continue;
518 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
519 MLXSW_REG_RALST_BIN_NO_CHILD);
520 last_prefix = prefix;
521 }
522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
523}
524
525static struct mlxsw_sp_lpm_tree *
526mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
527 struct mlxsw_sp_prefix_usage *prefix_usage,
382dbb40 528 enum mlxsw_sp_l3proto proto)
53342023
JP
529{
530 struct mlxsw_sp_lpm_tree *lpm_tree;
531 int err;
532
382dbb40 533 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
53342023
JP
534 if (!lpm_tree)
535 return ERR_PTR(-EBUSY);
536 lpm_tree->proto = proto;
537 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
538 if (err)
539 return ERR_PTR(err);
540
541 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
542 lpm_tree);
543 if (err)
544 goto err_left_struct_set;
2083d367
JP
545 memcpy(&lpm_tree->prefix_usage, prefix_usage,
546 sizeof(lpm_tree->prefix_usage));
53342023
JP
547 return lpm_tree;
548
549err_left_struct_set:
550 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
551 return ERR_PTR(err);
552}
553
554static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
555 struct mlxsw_sp_lpm_tree *lpm_tree)
556{
557 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
558}
559
560static struct mlxsw_sp_lpm_tree *
561mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_prefix_usage *prefix_usage,
382dbb40 563 enum mlxsw_sp_l3proto proto)
53342023
JP
564{
565 struct mlxsw_sp_lpm_tree *lpm_tree;
566 int i;
567
9011b677
IS
568 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
569 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
8b99becd
JP
570 if (lpm_tree->ref_count != 0 &&
571 lpm_tree->proto == proto &&
53342023
JP
572 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
573 prefix_usage))
574 goto inc_ref_count;
575 }
576 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
382dbb40 577 proto);
53342023
JP
578 if (IS_ERR(lpm_tree))
579 return lpm_tree;
580
581inc_ref_count:
582 lpm_tree->ref_count++;
583 return lpm_tree;
584}
585
586static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
587 struct mlxsw_sp_lpm_tree *lpm_tree)
588{
589 if (--lpm_tree->ref_count == 0)
590 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
591 return 0;
592}
593
d7a60306 594#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
8494ab06
IS
595
596static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
53342023
JP
597{
598 struct mlxsw_sp_lpm_tree *lpm_tree;
8494ab06 599 u64 max_trees;
53342023
JP
600 int i;
601
8494ab06
IS
602 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
603 return -EIO;
604
605 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
9011b677
IS
606 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
607 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
8494ab06
IS
608 sizeof(struct mlxsw_sp_lpm_tree),
609 GFP_KERNEL);
9011b677 610 if (!mlxsw_sp->router->lpm.trees)
8494ab06
IS
611 return -ENOMEM;
612
9011b677
IS
613 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
614 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
53342023
JP
615 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
616 }
8494ab06
IS
617
618 return 0;
619}
620
621static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
622{
9011b677 623 kfree(mlxsw_sp->router->lpm.trees);
53342023
JP
624}
625
76610ebb
IS
626static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
627{
628 return !!vr->fib4;
629}
630
6b75c480
JP
631static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
632{
633 struct mlxsw_sp_vr *vr;
634 int i;
635
c1a38311 636 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 637 vr = &mlxsw_sp->router->vrs[i];
76610ebb 638 if (!mlxsw_sp_vr_is_used(vr))
6b75c480
JP
639 return vr;
640 }
641 return NULL;
642}
643
644static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
76610ebb 645 const struct mlxsw_sp_fib *fib)
6b75c480
JP
646{
647 char raltb_pl[MLXSW_REG_RALTB_LEN];
648
76610ebb
IS
649 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
650 (enum mlxsw_reg_ralxx_protocol) fib->proto,
651 fib->lpm_tree->id);
6b75c480
JP
652 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
653}
654
655static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
76610ebb 656 const struct mlxsw_sp_fib *fib)
6b75c480
JP
657{
658 char raltb_pl[MLXSW_REG_RALTB_LEN];
659
660 /* Bind to tree 0 which is default */
76610ebb
IS
661 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
662 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
6b75c480
JP
663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
664}
665
666static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
667{
668 /* For our purpose, squash main and local table into one */
669 if (tb_id == RT_TABLE_LOCAL)
670 tb_id = RT_TABLE_MAIN;
671 return tb_id;
672}
673
674static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
76610ebb 675 u32 tb_id)
6b75c480
JP
676{
677 struct mlxsw_sp_vr *vr;
678 int i;
679
680 tb_id = mlxsw_sp_fix_tb_id(tb_id);
9497c042 681
c1a38311 682 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 683 vr = &mlxsw_sp->router->vrs[i];
76610ebb 684 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
6b75c480
JP
685 return vr;
686 }
687 return NULL;
688}
689
76610ebb
IS
690static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
691 enum mlxsw_sp_l3proto proto)
692{
693 switch (proto) {
694 case MLXSW_SP_L3_PROTO_IPV4:
695 return vr->fib4;
696 case MLXSW_SP_L3_PROTO_IPV6:
697 BUG_ON(1);
698 }
699 return NULL;
700}
701
6b75c480 702static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
76610ebb 703 u32 tb_id)
6b75c480 704{
6b75c480 705 struct mlxsw_sp_vr *vr;
6b75c480
JP
706
707 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
708 if (!vr)
709 return ERR_PTR(-EBUSY);
76610ebb
IS
710 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
711 if (IS_ERR(vr->fib4))
712 return ERR_CAST(vr->fib4);
6b75c480 713 vr->tb_id = tb_id;
6b75c480 714 return vr;
6b75c480
JP
715}
716
76610ebb 717static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
6b75c480 718{
76610ebb
IS
719 mlxsw_sp_fib_destroy(vr->fib4);
720 vr->fib4 = NULL;
6b75c480
JP
721}
722
723static int
76610ebb 724mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
6b75c480
JP
725 struct mlxsw_sp_prefix_usage *req_prefix_usage)
726{
76610ebb 727 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
f7df4923
IS
728 struct mlxsw_sp_lpm_tree *new_tree;
729 int err;
6b75c480 730
f7df4923 731 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
6b75c480
JP
732 return 0;
733
f7df4923 734 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
76610ebb 735 fib->proto);
f7df4923 736 if (IS_ERR(new_tree)) {
6b75c480
JP
737 /* We failed to get a tree according to the required
738 * prefix usage. However, the current tree might be still good
739 * for us if our requirement is subset of the prefixes used
740 * in the tree.
741 */
742 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
f7df4923 743 &lpm_tree->prefix_usage))
6b75c480 744 return 0;
f7df4923 745 return PTR_ERR(new_tree);
6b75c480
JP
746 }
747
f7df4923 748 /* Prevent packet loss by overwriting existing binding */
76610ebb
IS
749 fib->lpm_tree = new_tree;
750 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
f7df4923
IS
751 if (err)
752 goto err_tree_bind;
753 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
754
755 return 0;
756
757err_tree_bind:
76610ebb 758 fib->lpm_tree = lpm_tree;
f7df4923
IS
759 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
760 return err;
6b75c480
JP
761}
762
76610ebb 763static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
6b75c480
JP
764{
765 struct mlxsw_sp_vr *vr;
6b75c480
JP
766
767 tb_id = mlxsw_sp_fix_tb_id(tb_id);
76610ebb
IS
768 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
769 if (!vr)
770 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
6b75c480
JP
771 return vr;
772}
773
76610ebb 774static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
6b75c480 775{
6913229e 776 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
76610ebb 777 mlxsw_sp_vr_destroy(vr);
6b75c480
JP
778}
779
9497c042 780static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
6b75c480
JP
781{
782 struct mlxsw_sp_vr *vr;
c1a38311 783 u64 max_vrs;
6b75c480
JP
784 int i;
785
c1a38311 786 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
9497c042
NF
787 return -EIO;
788
c1a38311 789 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
9011b677
IS
790 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
791 GFP_KERNEL);
792 if (!mlxsw_sp->router->vrs)
9497c042
NF
793 return -ENOMEM;
794
c1a38311 795 for (i = 0; i < max_vrs; i++) {
9011b677 796 vr = &mlxsw_sp->router->vrs[i];
6b75c480
JP
797 vr->id = i;
798 }
9497c042
NF
799
800 return 0;
801}
802
ac571de9
IS
803static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
804
9497c042
NF
805static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
806{
3057224e
IS
807 /* At this stage we're guaranteed not to have new incoming
808 * FIB notifications and the work queue is free from FIBs
809 * sitting on top of mlxsw netdevs. However, we can still
810 * have other FIBs queued. Flush the queue before flushing
811 * the device's tables. No need for locks, as we're the only
812 * writer.
813 */
814 mlxsw_core_flush_owq();
ac571de9 815 mlxsw_sp_router_fib_flush(mlxsw_sp);
9011b677 816 kfree(mlxsw_sp->router->vrs);
6b75c480
JP
817}
818
6cf3c971 819struct mlxsw_sp_neigh_key {
33b1341c 820 struct neighbour *n;
6cf3c971
JP
821};
822
823struct mlxsw_sp_neigh_entry {
9665b745 824 struct list_head rif_list_node;
6cf3c971
JP
825 struct rhash_head ht_node;
826 struct mlxsw_sp_neigh_key key;
827 u16 rif;
5c8802f1 828 bool connected;
a6bf9e93 829 unsigned char ha[ETH_ALEN];
a7ff87ac
JP
830 struct list_head nexthop_list; /* list of nexthops using
831 * this neigh entry
832 */
b2157149 833 struct list_head nexthop_neighs_list_node;
6cf3c971
JP
834};
835
836static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
837 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
838 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
839 .key_len = sizeof(struct mlxsw_sp_neigh_key),
840};
841
6cf3c971 842static struct mlxsw_sp_neigh_entry *
5c8802f1
IS
843mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
844 u16 rif)
6cf3c971
JP
845{
846 struct mlxsw_sp_neigh_entry *neigh_entry;
847
5c8802f1 848 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
6cf3c971
JP
849 if (!neigh_entry)
850 return NULL;
5c8802f1 851
33b1341c 852 neigh_entry->key.n = n;
6cf3c971 853 neigh_entry->rif = rif;
a7ff87ac 854 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
5c8802f1 855
6cf3c971
JP
856 return neigh_entry;
857}
858
5c8802f1 859static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971
JP
860{
861 kfree(neigh_entry);
862}
863
5c8802f1
IS
864static int
865mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
866 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 867{
9011b677 868 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1
IS
869 &neigh_entry->ht_node,
870 mlxsw_sp_neigh_ht_params);
871}
6cf3c971 872
5c8802f1
IS
873static void
874mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
875 struct mlxsw_sp_neigh_entry *neigh_entry)
876{
9011b677 877 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1
IS
878 &neigh_entry->ht_node,
879 mlxsw_sp_neigh_ht_params);
6cf3c971
JP
880}
881
5c8802f1
IS
882static struct mlxsw_sp_neigh_entry *
883mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
6cf3c971 884{
6cf3c971 885 struct mlxsw_sp_neigh_entry *neigh_entry;
bf95233e 886 struct mlxsw_sp_rif *rif;
6cf3c971
JP
887 int err;
888
bf95233e
AS
889 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
890 if (!rif)
5c8802f1 891 return ERR_PTR(-EINVAL);
6cf3c971 892
bf95233e 893 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
6cf3c971 894 if (!neigh_entry)
5c8802f1
IS
895 return ERR_PTR(-ENOMEM);
896
6cf3c971
JP
897 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
898 if (err)
899 goto err_neigh_entry_insert;
5c8802f1 900
bf95233e 901 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
9665b745 902
5c8802f1 903 return neigh_entry;
6cf3c971
JP
904
905err_neigh_entry_insert:
5c8802f1
IS
906 mlxsw_sp_neigh_entry_free(neigh_entry);
907 return ERR_PTR(err);
6cf3c971
JP
908}
909
5c8802f1
IS
910static void
911mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
912 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 913{
9665b745 914 list_del(&neigh_entry->rif_list_node);
5c8802f1
IS
915 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
916 mlxsw_sp_neigh_entry_free(neigh_entry);
917}
6cf3c971 918
5c8802f1
IS
919static struct mlxsw_sp_neigh_entry *
920mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
921{
922 struct mlxsw_sp_neigh_key key;
6cf3c971 923
5c8802f1 924 key.n = n;
9011b677 925 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
5c8802f1 926 &key, mlxsw_sp_neigh_ht_params);
6cf3c971
JP
927}
928
c723c735
YG
929static void
930mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
931{
932 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
933
9011b677 934 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
c723c735
YG
935}
936
937static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
938 char *rauhtd_pl,
939 int ent_index)
940{
941 struct net_device *dev;
942 struct neighbour *n;
943 __be32 dipn;
944 u32 dip;
945 u16 rif;
946
947 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
948
5f9efffb 949 if (!mlxsw_sp->router->rifs[rif]) {
c723c735
YG
950 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
951 return;
952 }
953
954 dipn = htonl(dip);
5f9efffb 955 dev = mlxsw_sp->router->rifs[rif]->dev;
c723c735
YG
956 n = neigh_lookup(&arp_tbl, &dipn, dev);
957 if (!n) {
958 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
959 &dip);
960 return;
961 }
962
963 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
964 neigh_event_send(n, NULL);
965 neigh_release(n);
966}
967
968static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
969 char *rauhtd_pl,
970 int rec_index)
971{
972 u8 num_entries;
973 int i;
974
975 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
976 rec_index);
977 /* Hardware starts counting at 0, so add 1. */
978 num_entries++;
979
980 /* Each record consists of several neighbour entries. */
981 for (i = 0; i < num_entries; i++) {
982 int ent_index;
983
984 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
985 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
986 ent_index);
987 }
988
989}
990
991static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
992 char *rauhtd_pl, int rec_index)
993{
994 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
995 case MLXSW_REG_RAUHTD_TYPE_IPV4:
996 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
997 rec_index);
998 break;
999 case MLXSW_REG_RAUHTD_TYPE_IPV6:
1000 WARN_ON_ONCE(1);
1001 break;
1002 }
1003}
1004
42cdb338
AS
1005static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1006{
1007 u8 num_rec, last_rec_index, num_entries;
1008
1009 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1010 last_rec_index = num_rec - 1;
1011
1012 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1013 return false;
1014 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1015 MLXSW_REG_RAUHTD_TYPE_IPV6)
1016 return true;
1017
1018 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1019 last_rec_index);
1020 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1021 return true;
1022 return false;
1023}
1024
b2157149 1025static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
c723c735 1026{
c723c735
YG
1027 char *rauhtd_pl;
1028 u8 num_rec;
1029 int i, err;
1030
1031 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1032 if (!rauhtd_pl)
b2157149 1033 return -ENOMEM;
c723c735
YG
1034
1035 /* Make sure the neighbour's netdev isn't removed in the
1036 * process.
1037 */
1038 rtnl_lock();
1039 do {
1040 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
1041 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1042 rauhtd_pl);
1043 if (err) {
1044 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
1045 break;
1046 }
1047 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1048 for (i = 0; i < num_rec; i++)
1049 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1050 i);
42cdb338 1051 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
c723c735
YG
1052 rtnl_unlock();
1053
1054 kfree(rauhtd_pl);
b2157149
YG
1055 return err;
1056}
1057
1058static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1059{
1060 struct mlxsw_sp_neigh_entry *neigh_entry;
1061
1062 /* Take RTNL mutex here to prevent lists from changes */
1063 rtnl_lock();
9011b677 1064 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
8a0b7275 1065 nexthop_neighs_list_node)
b2157149
YG
1066 /* If this neigh have nexthops, make the kernel think this neigh
1067 * is active regardless of the traffic.
1068 */
8a0b7275 1069 neigh_event_send(neigh_entry->key.n, NULL);
b2157149
YG
1070 rtnl_unlock();
1071}
1072
1073static void
1074mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1075{
9011b677 1076 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
b2157149 1077
9011b677 1078 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
b2157149
YG
1079 msecs_to_jiffies(interval));
1080}
1081
1082static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1083{
9011b677 1084 struct mlxsw_sp_router *router;
b2157149
YG
1085 int err;
1086
9011b677
IS
1087 router = container_of(work, struct mlxsw_sp_router,
1088 neighs_update.dw.work);
1089 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
b2157149 1090 if (err)
9011b677 1091 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
b2157149 1092
9011b677 1093 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
b2157149 1094
9011b677 1095 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
c723c735
YG
1096}
1097
0b2361d9
YG
1098static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1099{
1100 struct mlxsw_sp_neigh_entry *neigh_entry;
9011b677 1101 struct mlxsw_sp_router *router;
0b2361d9 1102
9011b677
IS
1103 router = container_of(work, struct mlxsw_sp_router,
1104 nexthop_probe_dw.work);
0b2361d9
YG
1105 /* Iterate over nexthop neighbours, find those who are unresolved and
1106 * send arp on them. This solves the chicken-egg problem when
1107 * the nexthop wouldn't get offloaded until the neighbor is resolved
1108 * but it wouldn't get resolved ever in case traffic is flowing in HW
1109 * using different nexthop.
1110 *
1111 * Take RTNL mutex here to prevent lists from changes.
1112 */
1113 rtnl_lock();
9011b677 1114 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
8a0b7275 1115 nexthop_neighs_list_node)
01b1aa35 1116 if (!neigh_entry->connected)
33b1341c 1117 neigh_event_send(neigh_entry->key.n, NULL);
0b2361d9
YG
1118 rtnl_unlock();
1119
9011b677 1120 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
0b2361d9
YG
1121 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1122}
1123
a7ff87ac
JP
1124static void
1125mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1126 struct mlxsw_sp_neigh_entry *neigh_entry,
1127 bool removing);
1128
5c8802f1
IS
1129static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
1130{
1131 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1132 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1133}
1134
1135static void
1136mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1137 struct mlxsw_sp_neigh_entry *neigh_entry,
1138 enum mlxsw_reg_rauht_op op)
a6bf9e93 1139{
33b1341c 1140 struct neighbour *n = neigh_entry->key.n;
5c8802f1 1141 u32 dip = ntohl(*((__be32 *) n->primary_key));
a6bf9e93 1142 char rauht_pl[MLXSW_REG_RAUHT_LEN];
5c8802f1
IS
1143
1144 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1145 dip);
1146 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1147}
1148
1149static void
1150mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1151 struct mlxsw_sp_neigh_entry *neigh_entry,
1152 bool adding)
1153{
1154 if (!adding && !neigh_entry->connected)
1155 return;
1156 neigh_entry->connected = adding;
1157 if (neigh_entry->key.n->tbl == &arp_tbl)
1158 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1159 mlxsw_sp_rauht_op(adding));
1160 else
1161 WARN_ON_ONCE(1);
1162}
1163
1164struct mlxsw_sp_neigh_event_work {
1165 struct work_struct work;
1166 struct mlxsw_sp *mlxsw_sp;
1167 struct neighbour *n;
1168};
1169
1170static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1171{
1172 struct mlxsw_sp_neigh_event_work *neigh_work =
1173 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1174 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1175 struct mlxsw_sp_neigh_entry *neigh_entry;
1176 struct neighbour *n = neigh_work->n;
1177 unsigned char ha[ETH_ALEN];
a6bf9e93 1178 bool entry_connected;
93a87e5e 1179 u8 nud_state, dead;
a6bf9e93 1180
5c8802f1
IS
1181 /* If these parameters are changed after we release the lock,
1182 * then we are guaranteed to receive another event letting us
1183 * know about it.
1184 */
a6bf9e93 1185 read_lock_bh(&n->lock);
5c8802f1 1186 memcpy(ha, n->ha, ETH_ALEN);
a6bf9e93 1187 nud_state = n->nud_state;
93a87e5e 1188 dead = n->dead;
a6bf9e93
YG
1189 read_unlock_bh(&n->lock);
1190
5c8802f1 1191 rtnl_lock();
93a87e5e 1192 entry_connected = nud_state & NUD_VALID && !dead;
5c8802f1
IS
1193 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1194 if (!entry_connected && !neigh_entry)
1195 goto out;
1196 if (!neigh_entry) {
1197 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1198 if (IS_ERR(neigh_entry))
1199 goto out;
a6bf9e93
YG
1200 }
1201
5c8802f1
IS
1202 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1203 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1204 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1205
1206 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1207 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1208
1209out:
1210 rtnl_unlock();
a6bf9e93 1211 neigh_release(n);
5c8802f1 1212 kfree(neigh_work);
a6bf9e93
YG
1213}
1214
e7322638
JP
1215int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1216 unsigned long event, void *ptr)
c723c735 1217{
5c8802f1 1218 struct mlxsw_sp_neigh_event_work *neigh_work;
c723c735
YG
1219 struct mlxsw_sp_port *mlxsw_sp_port;
1220 struct mlxsw_sp *mlxsw_sp;
1221 unsigned long interval;
1222 struct neigh_parms *p;
a6bf9e93 1223 struct neighbour *n;
c723c735
YG
1224
1225 switch (event) {
1226 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1227 p = ptr;
1228
1229 /* We don't care about changes in the default table. */
1230 if (!p->dev || p->tbl != &arp_tbl)
1231 return NOTIFY_DONE;
1232
1233 /* We are in atomic context and can't take RTNL mutex,
1234 * so use RCU variant to walk the device chain.
1235 */
1236 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1237 if (!mlxsw_sp_port)
1238 return NOTIFY_DONE;
1239
1240 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1241 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
9011b677 1242 mlxsw_sp->router->neighs_update.interval = interval;
c723c735
YG
1243
1244 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1245 break;
a6bf9e93
YG
1246 case NETEVENT_NEIGH_UPDATE:
1247 n = ptr;
a6bf9e93
YG
1248
1249 if (n->tbl != &arp_tbl)
1250 return NOTIFY_DONE;
1251
5c8802f1 1252 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
a6bf9e93
YG
1253 if (!mlxsw_sp_port)
1254 return NOTIFY_DONE;
1255
5c8802f1
IS
1256 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1257 if (!neigh_work) {
a6bf9e93 1258 mlxsw_sp_port_dev_put(mlxsw_sp_port);
5c8802f1 1259 return NOTIFY_BAD;
a6bf9e93 1260 }
5c8802f1
IS
1261
1262 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1263 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1264 neigh_work->n = n;
a6bf9e93
YG
1265
1266 /* Take a reference to ensure the neighbour won't be
1267 * destructed until we drop the reference in delayed
1268 * work.
1269 */
1270 neigh_clone(n);
5c8802f1
IS
1271 mlxsw_core_schedule_work(&neigh_work->work);
1272 mlxsw_sp_port_dev_put(mlxsw_sp_port);
a6bf9e93 1273 break;
c723c735
YG
1274 }
1275
1276 return NOTIFY_DONE;
1277}
1278
6cf3c971
JP
1279static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1280{
c723c735
YG
1281 int err;
1282
9011b677 1283 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
c723c735
YG
1284 &mlxsw_sp_neigh_ht_params);
1285 if (err)
1286 return err;
1287
1288 /* Initialize the polling interval according to the default
1289 * table.
1290 */
1291 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1292
0b2361d9 1293 /* Create the delayed works for the activity_update */
9011b677 1294 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
c723c735 1295 mlxsw_sp_router_neighs_update_work);
9011b677 1296 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
0b2361d9 1297 mlxsw_sp_router_probe_unresolved_nexthops);
9011b677
IS
1298 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1299 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
c723c735 1300 return 0;
6cf3c971
JP
1301}
1302
1303static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1304{
9011b677
IS
1305 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1306 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1307 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
6cf3c971
JP
1308}
1309
9665b745 1310static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
bf95233e 1311 const struct mlxsw_sp_rif *rif)
9665b745
IS
1312{
1313 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1314
1315 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
bf95233e 1316 rif->rif_index, rif->addr);
9665b745
IS
1317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1318}
1319
1320static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 1321 struct mlxsw_sp_rif *rif)
9665b745
IS
1322{
1323 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1324
bf95233e
AS
1325 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1326 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
9665b745
IS
1327 rif_list_node)
1328 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1329}
1330
c53b8e1b
IS
1331struct mlxsw_sp_nexthop_key {
1332 struct fib_nh *fib_nh;
1333};
1334
a7ff87ac
JP
1335struct mlxsw_sp_nexthop {
1336 struct list_head neigh_list_node; /* member of neigh entry list */
9665b745 1337 struct list_head rif_list_node;
a7ff87ac
JP
1338 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1339 * this belongs to
1340 */
c53b8e1b
IS
1341 struct rhash_head ht_node;
1342 struct mlxsw_sp_nexthop_key key;
bf95233e 1343 struct mlxsw_sp_rif *rif;
a7ff87ac
JP
1344 u8 should_offload:1, /* set indicates this neigh is connected and
1345 * should be put to KVD linear area of this group.
1346 */
1347 offloaded:1, /* set in case the neigh is actually put into
1348 * KVD linear area of this group.
1349 */
1350 update:1; /* set indicates that MAC of this neigh should be
1351 * updated in HW
1352 */
1353 struct mlxsw_sp_neigh_entry *neigh_entry;
1354};
1355
e9ad5e7d
IS
1356struct mlxsw_sp_nexthop_group_key {
1357 struct fib_info *fi;
1358};
1359
a7ff87ac 1360struct mlxsw_sp_nexthop_group {
e9ad5e7d 1361 struct rhash_head ht_node;
a7ff87ac 1362 struct list_head fib_list; /* list of fib entries that use this group */
e9ad5e7d 1363 struct mlxsw_sp_nexthop_group_key key;
b3e8d1eb
IS
1364 u8 adj_index_valid:1,
1365 gateway:1; /* routes using the group use a gateway */
a7ff87ac
JP
1366 u32 adj_index;
1367 u16 ecmp_size;
1368 u16 count;
1369 struct mlxsw_sp_nexthop nexthops[0];
bf95233e 1370#define nh_rif nexthops[0].rif
a7ff87ac
JP
1371};
1372
e9ad5e7d
IS
1373static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1374 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1375 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1376 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1377};
1378
1379static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1380 struct mlxsw_sp_nexthop_group *nh_grp)
1381{
9011b677 1382 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
1383 &nh_grp->ht_node,
1384 mlxsw_sp_nexthop_group_ht_params);
1385}
1386
1387static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1388 struct mlxsw_sp_nexthop_group *nh_grp)
1389{
9011b677 1390 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
1391 &nh_grp->ht_node,
1392 mlxsw_sp_nexthop_group_ht_params);
1393}
1394
1395static struct mlxsw_sp_nexthop_group *
1396mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1397 struct mlxsw_sp_nexthop_group_key key)
1398{
9011b677 1399 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
e9ad5e7d
IS
1400 mlxsw_sp_nexthop_group_ht_params);
1401}
1402
c53b8e1b
IS
1403static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1404 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1405 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1406 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1407};
1408
1409static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1410 struct mlxsw_sp_nexthop *nh)
1411{
9011b677 1412 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
c53b8e1b
IS
1413 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1414}
1415
1416static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1417 struct mlxsw_sp_nexthop *nh)
1418{
9011b677 1419 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
c53b8e1b
IS
1420 mlxsw_sp_nexthop_ht_params);
1421}
1422
ad178c8e
IS
1423static struct mlxsw_sp_nexthop *
1424mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1425 struct mlxsw_sp_nexthop_key key)
1426{
9011b677 1427 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
ad178c8e
IS
1428 mlxsw_sp_nexthop_ht_params);
1429}
1430
a7ff87ac 1431static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
76610ebb 1432 const struct mlxsw_sp_fib *fib,
a7ff87ac
JP
1433 u32 adj_index, u16 ecmp_size,
1434 u32 new_adj_index,
1435 u16 new_ecmp_size)
1436{
1437 char raleu_pl[MLXSW_REG_RALEU_LEN];
1438
1a9234e6 1439 mlxsw_reg_raleu_pack(raleu_pl,
76610ebb
IS
1440 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1441 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1a9234e6 1442 new_ecmp_size);
a7ff87ac
JP
1443 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1444}
1445
1446static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1447 struct mlxsw_sp_nexthop_group *nh_grp,
1448 u32 old_adj_index, u16 old_ecmp_size)
1449{
1450 struct mlxsw_sp_fib_entry *fib_entry;
76610ebb 1451 struct mlxsw_sp_fib *fib = NULL;
a7ff87ac
JP
1452 int err;
1453
1454 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
76610ebb 1455 if (fib == fib_entry->fib_node->fib)
a7ff87ac 1456 continue;
76610ebb
IS
1457 fib = fib_entry->fib_node->fib;
1458 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
a7ff87ac
JP
1459 old_adj_index,
1460 old_ecmp_size,
1461 nh_grp->adj_index,
1462 nh_grp->ecmp_size);
1463 if (err)
1464 return err;
1465 }
1466 return 0;
1467}
1468
1469static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1470 struct mlxsw_sp_nexthop *nh)
1471{
1472 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1473 char ratr_pl[MLXSW_REG_RATR_LEN];
1474
1475 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1476 true, adj_index, neigh_entry->rif);
1477 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1478 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1479}
1480
1481static int
1482mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
a59b7e02
IS
1483 struct mlxsw_sp_nexthop_group *nh_grp,
1484 bool reallocate)
a7ff87ac
JP
1485{
1486 u32 adj_index = nh_grp->adj_index; /* base */
1487 struct mlxsw_sp_nexthop *nh;
1488 int i;
1489 int err;
1490
1491 for (i = 0; i < nh_grp->count; i++) {
1492 nh = &nh_grp->nexthops[i];
1493
1494 if (!nh->should_offload) {
1495 nh->offloaded = 0;
1496 continue;
1497 }
1498
a59b7e02 1499 if (nh->update || reallocate) {
a7ff87ac
JP
1500 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1501 adj_index, nh);
1502 if (err)
1503 return err;
1504 nh->update = 0;
1505 nh->offloaded = 1;
1506 }
1507 adj_index++;
1508 }
1509 return 0;
1510}
1511
1512static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1513 struct mlxsw_sp_fib_entry *fib_entry);
1514
1819ae3d
IS
1515static bool
1516mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
1517 const struct mlxsw_sp_fib_entry *fib_entry);
1518
a7ff87ac
JP
1519static int
1520mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1521 struct mlxsw_sp_nexthop_group *nh_grp)
1522{
1523 struct mlxsw_sp_fib_entry *fib_entry;
1524 int err;
1525
1526 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1819ae3d
IS
1527 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1528 fib_entry))
1529 continue;
a7ff87ac
JP
1530 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1531 if (err)
1532 return err;
1533 }
1534 return 0;
1535}
1536
1537static void
1538mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1539 struct mlxsw_sp_nexthop_group *nh_grp)
1540{
1541 struct mlxsw_sp_nexthop *nh;
1542 bool offload_change = false;
1543 u32 adj_index;
1544 u16 ecmp_size = 0;
1545 bool old_adj_index_valid;
1546 u32 old_adj_index;
1547 u16 old_ecmp_size;
a7ff87ac
JP
1548 int i;
1549 int err;
1550
b3e8d1eb
IS
1551 if (!nh_grp->gateway) {
1552 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1553 return;
1554 }
1555
a7ff87ac
JP
1556 for (i = 0; i < nh_grp->count; i++) {
1557 nh = &nh_grp->nexthops[i];
1558
1559 if (nh->should_offload ^ nh->offloaded) {
1560 offload_change = true;
1561 if (nh->should_offload)
1562 nh->update = 1;
1563 }
1564 if (nh->should_offload)
1565 ecmp_size++;
1566 }
1567 if (!offload_change) {
1568 /* Nothing was added or removed, so no need to reallocate. Just
1569 * update MAC on existing adjacency indexes.
1570 */
a59b7e02
IS
1571 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1572 false);
a7ff87ac
JP
1573 if (err) {
1574 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1575 goto set_trap;
1576 }
1577 return;
1578 }
1579 if (!ecmp_size)
1580 /* No neigh of this group is connected so we just set
1581 * the trap and let everthing flow through kernel.
1582 */
1583 goto set_trap;
1584
13124443
AS
1585 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1586 if (err) {
a7ff87ac
JP
1587 /* We ran out of KVD linear space, just set the
1588 * trap and let everything flow through kernel.
1589 */
1590 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1591 goto set_trap;
1592 }
a7ff87ac
JP
1593 old_adj_index_valid = nh_grp->adj_index_valid;
1594 old_adj_index = nh_grp->adj_index;
1595 old_ecmp_size = nh_grp->ecmp_size;
1596 nh_grp->adj_index_valid = 1;
1597 nh_grp->adj_index = adj_index;
1598 nh_grp->ecmp_size = ecmp_size;
a59b7e02 1599 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
a7ff87ac
JP
1600 if (err) {
1601 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1602 goto set_trap;
1603 }
1604
1605 if (!old_adj_index_valid) {
1606 /* The trap was set for fib entries, so we have to call
1607 * fib entry update to unset it and use adjacency index.
1608 */
1609 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1610 if (err) {
1611 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1612 goto set_trap;
1613 }
1614 return;
1615 }
1616
1617 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1618 old_adj_index, old_ecmp_size);
1619 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1620 if (err) {
1621 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1622 goto set_trap;
1623 }
1624 return;
1625
1626set_trap:
1627 old_adj_index_valid = nh_grp->adj_index_valid;
1628 nh_grp->adj_index_valid = 0;
1629 for (i = 0; i < nh_grp->count; i++) {
1630 nh = &nh_grp->nexthops[i];
1631 nh->offloaded = 0;
1632 }
1633 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1634 if (err)
1635 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1636 if (old_adj_index_valid)
1637 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1638}
1639
1640static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1641 bool removing)
1642{
1643 if (!removing && !nh->should_offload)
1644 nh->should_offload = 1;
1645 else if (removing && nh->offloaded)
1646 nh->should_offload = 0;
1647 nh->update = 1;
1648}
1649
1650static void
1651mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1652 struct mlxsw_sp_neigh_entry *neigh_entry,
1653 bool removing)
1654{
1655 struct mlxsw_sp_nexthop *nh;
1656
a7ff87ac
JP
1657 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1658 neigh_list_node) {
1659 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1660 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1661 }
a7ff87ac
JP
1662}
1663
9665b745 1664static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
bf95233e 1665 struct mlxsw_sp_rif *rif)
9665b745 1666{
bf95233e 1667 if (nh->rif)
9665b745
IS
1668 return;
1669
bf95233e
AS
1670 nh->rif = rif;
1671 list_add(&nh->rif_list_node, &rif->nexthop_list);
9665b745
IS
1672}
1673
1674static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1675{
bf95233e 1676 if (!nh->rif)
9665b745
IS
1677 return;
1678
1679 list_del(&nh->rif_list_node);
bf95233e 1680 nh->rif = NULL;
9665b745
IS
1681}
1682
a8c97014
IS
1683static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1684 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1685{
1686 struct mlxsw_sp_neigh_entry *neigh_entry;
a8c97014 1687 struct fib_nh *fib_nh = nh->key.fib_nh;
a7ff87ac 1688 struct neighbour *n;
93a87e5e 1689 u8 nud_state, dead;
c53b8e1b
IS
1690 int err;
1691
ad178c8e 1692 if (!nh->nh_grp->gateway || nh->neigh_entry)
b8399a1e
IS
1693 return 0;
1694
33b1341c
JP
1695 /* Take a reference of neigh here ensuring that neigh would
1696 * not be detructed before the nexthop entry is finished.
1697 * The reference is taken either in neigh_lookup() or
fd76d910 1698 * in neigh_create() in case n is not found.
33b1341c 1699 */
a8c97014 1700 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
33b1341c 1701 if (!n) {
a8c97014
IS
1702 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1703 if (IS_ERR(n))
1704 return PTR_ERR(n);
a7ff87ac 1705 neigh_event_send(n, NULL);
33b1341c
JP
1706 }
1707 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1708 if (!neigh_entry) {
5c8802f1
IS
1709 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1710 if (IS_ERR(neigh_entry)) {
c53b8e1b
IS
1711 err = -EINVAL;
1712 goto err_neigh_entry_create;
5c8802f1 1713 }
a7ff87ac 1714 }
b2157149
YG
1715
1716 /* If that is the first nexthop connected to that neigh, add to
1717 * nexthop_neighs_list
1718 */
1719 if (list_empty(&neigh_entry->nexthop_list))
1720 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
9011b677 1721 &mlxsw_sp->router->nexthop_neighs_list);
b2157149 1722
a7ff87ac
JP
1723 nh->neigh_entry = neigh_entry;
1724 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1725 read_lock_bh(&n->lock);
1726 nud_state = n->nud_state;
93a87e5e 1727 dead = n->dead;
a7ff87ac 1728 read_unlock_bh(&n->lock);
93a87e5e 1729 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
a7ff87ac
JP
1730
1731 return 0;
c53b8e1b
IS
1732
1733err_neigh_entry_create:
1734 neigh_release(n);
c53b8e1b 1735 return err;
a7ff87ac
JP
1736}
1737
a8c97014
IS
1738static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1739 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1740{
1741 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
a8c97014 1742 struct neighbour *n;
a7ff87ac 1743
b8399a1e 1744 if (!neigh_entry)
a8c97014
IS
1745 return;
1746 n = neigh_entry->key.n;
b8399a1e 1747
58312125 1748 __mlxsw_sp_nexthop_neigh_update(nh, true);
a7ff87ac 1749 list_del(&nh->neigh_list_node);
e58be79e 1750 nh->neigh_entry = NULL;
b2157149
YG
1751
1752 /* If that is the last nexthop connected to that neigh, remove from
1753 * nexthop_neighs_list
1754 */
e58be79e
IS
1755 if (list_empty(&neigh_entry->nexthop_list))
1756 list_del(&neigh_entry->nexthop_neighs_list_node);
b2157149 1757
5c8802f1
IS
1758 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1759 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1760
1761 neigh_release(n);
a8c97014 1762}
c53b8e1b 1763
a8c97014
IS
1764static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1765 struct mlxsw_sp_nexthop_group *nh_grp,
1766 struct mlxsw_sp_nexthop *nh,
1767 struct fib_nh *fib_nh)
1768{
1769 struct net_device *dev = fib_nh->nh_dev;
df6dd79b 1770 struct in_device *in_dev;
bf95233e 1771 struct mlxsw_sp_rif *rif;
a8c97014
IS
1772 int err;
1773
1774 nh->nh_grp = nh_grp;
1775 nh->key.fib_nh = fib_nh;
1776 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1777 if (err)
1778 return err;
1779
97989ee0
IS
1780 if (!dev)
1781 return 0;
1782
df6dd79b
IS
1783 in_dev = __in_dev_get_rtnl(dev);
1784 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1785 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1786 return 0;
1787
bf95233e
AS
1788 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1789 if (!rif)
a8c97014 1790 return 0;
bf95233e 1791 mlxsw_sp_nexthop_rif_init(nh, rif);
a8c97014
IS
1792
1793 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1794 if (err)
1795 goto err_nexthop_neigh_init;
1796
1797 return 0;
1798
1799err_nexthop_neigh_init:
a4e75b76 1800 mlxsw_sp_nexthop_rif_fini(nh);
a8c97014
IS
1801 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1802 return err;
1803}
1804
1805static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1806 struct mlxsw_sp_nexthop *nh)
1807{
1808 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
9665b745 1809 mlxsw_sp_nexthop_rif_fini(nh);
c53b8e1b 1810 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
a7ff87ac
JP
1811}
1812
ad178c8e
IS
1813static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1814 unsigned long event, struct fib_nh *fib_nh)
1815{
1816 struct mlxsw_sp_nexthop_key key;
1817 struct mlxsw_sp_nexthop *nh;
bf95233e 1818 struct mlxsw_sp_rif *rif;
ad178c8e 1819
9011b677 1820 if (mlxsw_sp->router->aborted)
ad178c8e
IS
1821 return;
1822
1823 key.fib_nh = fib_nh;
1824 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1825 if (WARN_ON_ONCE(!nh))
1826 return;
1827
bf95233e
AS
1828 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1829 if (!rif)
ad178c8e
IS
1830 return;
1831
1832 switch (event) {
1833 case FIB_EVENT_NH_ADD:
bf95233e 1834 mlxsw_sp_nexthop_rif_init(nh, rif);
ad178c8e
IS
1835 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1836 break;
1837 case FIB_EVENT_NH_DEL:
1838 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
9665b745 1839 mlxsw_sp_nexthop_rif_fini(nh);
ad178c8e
IS
1840 break;
1841 }
1842
1843 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1844}
1845
9665b745 1846static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 1847 struct mlxsw_sp_rif *rif)
9665b745
IS
1848{
1849 struct mlxsw_sp_nexthop *nh, *tmp;
1850
bf95233e 1851 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
9665b745
IS
1852 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1853 mlxsw_sp_nexthop_rif_fini(nh);
1854 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1855 }
1856}
1857
a7ff87ac
JP
1858static struct mlxsw_sp_nexthop_group *
1859mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1860{
1861 struct mlxsw_sp_nexthop_group *nh_grp;
1862 struct mlxsw_sp_nexthop *nh;
1863 struct fib_nh *fib_nh;
1864 size_t alloc_size;
1865 int i;
1866 int err;
1867
1868 alloc_size = sizeof(*nh_grp) +
1869 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1870 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1871 if (!nh_grp)
1872 return ERR_PTR(-ENOMEM);
1873 INIT_LIST_HEAD(&nh_grp->fib_list);
b3e8d1eb 1874 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
a7ff87ac 1875 nh_grp->count = fi->fib_nhs;
e9ad5e7d 1876 nh_grp->key.fi = fi;
7387dbbc 1877 fib_info_hold(fi);
a7ff87ac
JP
1878 for (i = 0; i < nh_grp->count; i++) {
1879 nh = &nh_grp->nexthops[i];
1880 fib_nh = &fi->fib_nh[i];
1881 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1882 if (err)
1883 goto err_nexthop_init;
1884 }
e9ad5e7d
IS
1885 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1886 if (err)
1887 goto err_nexthop_group_insert;
a7ff87ac
JP
1888 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1889 return nh_grp;
1890
e9ad5e7d 1891err_nexthop_group_insert:
a7ff87ac 1892err_nexthop_init:
df6dd79b
IS
1893 for (i--; i >= 0; i--) {
1894 nh = &nh_grp->nexthops[i];
a7ff87ac 1895 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
df6dd79b 1896 }
7387dbbc 1897 fib_info_put(nh_grp->key.fi);
a7ff87ac
JP
1898 kfree(nh_grp);
1899 return ERR_PTR(err);
1900}
1901
1902static void
1903mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1904 struct mlxsw_sp_nexthop_group *nh_grp)
1905{
1906 struct mlxsw_sp_nexthop *nh;
1907 int i;
1908
e9ad5e7d 1909 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
a7ff87ac
JP
1910 for (i = 0; i < nh_grp->count; i++) {
1911 nh = &nh_grp->nexthops[i];
1912 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1913 }
58312125
IS
1914 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1915 WARN_ON_ONCE(nh_grp->adj_index_valid);
7387dbbc 1916 fib_info_put(nh_grp->key.fi);
a7ff87ac
JP
1917 kfree(nh_grp);
1918}
1919
a7ff87ac
JP
1920static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1921 struct mlxsw_sp_fib_entry *fib_entry,
1922 struct fib_info *fi)
1923{
e9ad5e7d 1924 struct mlxsw_sp_nexthop_group_key key;
a7ff87ac
JP
1925 struct mlxsw_sp_nexthop_group *nh_grp;
1926
e9ad5e7d
IS
1927 key.fi = fi;
1928 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
a7ff87ac
JP
1929 if (!nh_grp) {
1930 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1931 if (IS_ERR(nh_grp))
1932 return PTR_ERR(nh_grp);
1933 }
1934 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1935 fib_entry->nh_group = nh_grp;
1936 return 0;
1937}
1938
1939static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1940 struct mlxsw_sp_fib_entry *fib_entry)
1941{
1942 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1943
1944 list_del(&fib_entry->nexthop_group_node);
1945 if (!list_empty(&nh_grp->fib_list))
1946 return;
1947 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1948}
1949
013b20f9
IS
1950static bool
1951mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1952{
1953 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1954
9aecce1c
IS
1955 if (fib_entry->params.tos)
1956 return false;
1957
013b20f9
IS
1958 switch (fib_entry->type) {
1959 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1960 return !!nh_group->adj_index_valid;
1961 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
70ad3506 1962 return !!nh_group->nh_rif;
013b20f9
IS
1963 default:
1964 return false;
1965 }
1966}
1967
1968static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1969{
1970 fib_entry->offloaded = true;
1971
76610ebb 1972 switch (fib_entry->fib_node->fib->proto) {
013b20f9
IS
1973 case MLXSW_SP_L3_PROTO_IPV4:
1974 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1975 break;
1976 case MLXSW_SP_L3_PROTO_IPV6:
1977 WARN_ON_ONCE(1);
1978 }
1979}
1980
1981static void
1982mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1983{
76610ebb 1984 switch (fib_entry->fib_node->fib->proto) {
013b20f9
IS
1985 case MLXSW_SP_L3_PROTO_IPV4:
1986 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1987 break;
1988 case MLXSW_SP_L3_PROTO_IPV6:
1989 WARN_ON_ONCE(1);
1990 }
1991
1992 fib_entry->offloaded = false;
1993}
1994
1995static void
1996mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1997 enum mlxsw_reg_ralue_op op, int err)
1998{
1999 switch (op) {
2000 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
2001 if (!fib_entry->offloaded)
2002 return;
2003 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
2004 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
2005 if (err)
2006 return;
2007 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
2008 !fib_entry->offloaded)
2009 mlxsw_sp_fib_entry_offload_set(fib_entry);
2010 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
2011 fib_entry->offloaded)
2012 mlxsw_sp_fib_entry_offload_unset(fib_entry);
2013 return;
2014 default:
2015 return;
2016 }
2017}
2018
a7ff87ac
JP
2019static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
2020 struct mlxsw_sp_fib_entry *fib_entry,
2021 enum mlxsw_reg_ralue_op op)
2022{
2023 char ralue_pl[MLXSW_REG_RALUE_LEN];
76610ebb 2024 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
9aecce1c 2025 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
a7ff87ac
JP
2026 enum mlxsw_reg_ralue_trap_action trap_action;
2027 u16 trap_id = 0;
2028 u32 adjacency_index = 0;
2029 u16 ecmp_size = 0;
2030
2031 /* In case the nexthop group adjacency index is valid, use it
2032 * with provided ECMP size. Otherwise, setup trap and pass
2033 * traffic to kernel.
2034 */
4b411477 2035 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
a7ff87ac
JP
2036 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2037 adjacency_index = fib_entry->nh_group->adj_index;
2038 ecmp_size = fib_entry->nh_group->ecmp_size;
2039 } else {
2040 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2041 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2042 }
2043
1a9234e6 2044 mlxsw_reg_ralue_pack4(ralue_pl,
76610ebb
IS
2045 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2046 fib->vr->id, fib_entry->fib_node->key.prefix_len,
9aecce1c 2047 *p_dip);
a7ff87ac
JP
2048 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
2049 adjacency_index, ecmp_size);
2050 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2051}
2052
61c503f9
JP
2053static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
2054 struct mlxsw_sp_fib_entry *fib_entry,
2055 enum mlxsw_reg_ralue_op op)
2056{
bf95233e 2057 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
76610ebb 2058 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
70ad3506 2059 enum mlxsw_reg_ralue_trap_action trap_action;
61c503f9 2060 char ralue_pl[MLXSW_REG_RALUE_LEN];
9aecce1c 2061 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
70ad3506 2062 u16 trap_id = 0;
bf95233e 2063 u16 rif_index = 0;
70ad3506
IS
2064
2065 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2066 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
bf95233e 2067 rif_index = rif->rif_index;
70ad3506
IS
2068 } else {
2069 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2070 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2071 }
61c503f9 2072
1a9234e6 2073 mlxsw_reg_ralue_pack4(ralue_pl,
76610ebb
IS
2074 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2075 fib->vr->id, fib_entry->fib_node->key.prefix_len,
9aecce1c 2076 *p_dip);
bf95233e
AS
2077 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2078 rif_index);
61c503f9
JP
2079 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2080}
2081
2082static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
2083 struct mlxsw_sp_fib_entry *fib_entry,
2084 enum mlxsw_reg_ralue_op op)
2085{
76610ebb 2086 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
61c503f9 2087 char ralue_pl[MLXSW_REG_RALUE_LEN];
9aecce1c 2088 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
61c503f9 2089
1a9234e6 2090 mlxsw_reg_ralue_pack4(ralue_pl,
76610ebb
IS
2091 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2092 fib->vr->id, fib_entry->fib_node->key.prefix_len,
9aecce1c 2093 *p_dip);
61c503f9
JP
2094 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2095 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2096}
2097
2098static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
2099 struct mlxsw_sp_fib_entry *fib_entry,
2100 enum mlxsw_reg_ralue_op op)
2101{
2102 switch (fib_entry->type) {
2103 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
a7ff87ac 2104 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
61c503f9
JP
2105 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2106 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
2107 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2108 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
2109 }
2110 return -EINVAL;
2111}
2112
2113static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2114 struct mlxsw_sp_fib_entry *fib_entry,
2115 enum mlxsw_reg_ralue_op op)
2116{
013b20f9
IS
2117 int err = -EINVAL;
2118
76610ebb 2119 switch (fib_entry->fib_node->fib->proto) {
61c503f9 2120 case MLXSW_SP_L3_PROTO_IPV4:
013b20f9
IS
2121 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
2122 break;
61c503f9 2123 case MLXSW_SP_L3_PROTO_IPV6:
013b20f9 2124 return err;
61c503f9 2125 }
013b20f9
IS
2126 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2127 return err;
61c503f9
JP
2128}
2129
2130static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2131 struct mlxsw_sp_fib_entry *fib_entry)
2132{
7146da31
JP
2133 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2134 MLXSW_REG_RALUE_OP_WRITE_WRITE);
61c503f9
JP
2135}
2136
2137static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2138 struct mlxsw_sp_fib_entry *fib_entry)
2139{
2140 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2141 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2142}
2143
61c503f9 2144static int
013b20f9
IS
2145mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2146 const struct fib_entry_notifier_info *fen_info,
2147 struct mlxsw_sp_fib_entry *fib_entry)
61c503f9 2148{
b45f64d1 2149 struct fib_info *fi = fen_info->fi;
61c503f9 2150
97989ee0
IS
2151 switch (fen_info->type) {
2152 case RTN_BROADCAST: /* fall through */
2153 case RTN_LOCAL:
61c503f9
JP
2154 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2155 return 0;
97989ee0
IS
2156 case RTN_UNREACHABLE: /* fall through */
2157 case RTN_BLACKHOLE: /* fall through */
2158 case RTN_PROHIBIT:
2159 /* Packets hitting these routes need to be trapped, but
2160 * can do so with a lower priority than packets directed
2161 * at the host, so use action type local instead of trap.
2162 */
61c503f9 2163 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
97989ee0
IS
2164 return 0;
2165 case RTN_UNICAST:
2166 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2167 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2168 else
2169 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2170 return 0;
2171 default:
2172 return -EINVAL;
2173 }
a7ff87ac
JP
2174}
2175
5b004412 2176static struct mlxsw_sp_fib_entry *
9aecce1c
IS
2177mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2178 struct mlxsw_sp_fib_node *fib_node,
2179 const struct fib_entry_notifier_info *fen_info)
61c503f9 2180{
61c503f9 2181 struct mlxsw_sp_fib_entry *fib_entry;
61c503f9
JP
2182 int err;
2183
9aecce1c 2184 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
61c503f9
JP
2185 if (!fib_entry) {
2186 err = -ENOMEM;
9aecce1c 2187 goto err_fib_entry_alloc;
61c503f9 2188 }
61c503f9 2189
013b20f9 2190 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
61c503f9 2191 if (err)
013b20f9 2192 goto err_fib4_entry_type_set;
61c503f9 2193
9aecce1c 2194 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
b8399a1e
IS
2195 if (err)
2196 goto err_nexthop_group_get;
2197
9aecce1c
IS
2198 fib_entry->params.prio = fen_info->fi->fib_priority;
2199 fib_entry->params.tb_id = fen_info->tb_id;
2200 fib_entry->params.type = fen_info->type;
2201 fib_entry->params.tos = fen_info->tos;
2202
2203 fib_entry->fib_node = fib_node;
2204
5b004412
JP
2205 return fib_entry;
2206
b8399a1e 2207err_nexthop_group_get:
013b20f9 2208err_fib4_entry_type_set:
9aecce1c
IS
2209 kfree(fib_entry);
2210err_fib_entry_alloc:
5b004412
JP
2211 return ERR_PTR(err);
2212}
2213
9aecce1c
IS
2214static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2215 struct mlxsw_sp_fib_entry *fib_entry)
2216{
2217 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2218 kfree(fib_entry);
2219}
2220
2221static struct mlxsw_sp_fib_node *
2222mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2223 const struct fib_entry_notifier_info *fen_info);
2224
5b004412 2225static struct mlxsw_sp_fib_entry *
9aecce1c
IS
2226mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2227 const struct fib_entry_notifier_info *fen_info)
5b004412 2228{
9aecce1c
IS
2229 struct mlxsw_sp_fib_entry *fib_entry;
2230 struct mlxsw_sp_fib_node *fib_node;
5b004412 2231
9aecce1c
IS
2232 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2233 if (IS_ERR(fib_node))
2234 return NULL;
2235
2236 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2237 if (fib_entry->params.tb_id == fen_info->tb_id &&
2238 fib_entry->params.tos == fen_info->tos &&
2239 fib_entry->params.type == fen_info->type &&
2240 fib_entry->nh_group->key.fi == fen_info->fi) {
2241 return fib_entry;
2242 }
2243 }
2244
2245 return NULL;
2246}
2247
2248static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2249 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2250 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2251 .key_len = sizeof(struct mlxsw_sp_fib_key),
2252 .automatic_shrinking = true,
2253};
2254
2255static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2256 struct mlxsw_sp_fib_node *fib_node)
2257{
2258 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2259 mlxsw_sp_fib_ht_params);
2260}
2261
2262static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2263 struct mlxsw_sp_fib_node *fib_node)
2264{
2265 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2266 mlxsw_sp_fib_ht_params);
2267}
2268
2269static struct mlxsw_sp_fib_node *
2270mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2271 size_t addr_len, unsigned char prefix_len)
2272{
2273 struct mlxsw_sp_fib_key key;
2274
2275 memset(&key, 0, sizeof(key));
2276 memcpy(key.addr, addr, addr_len);
2277 key.prefix_len = prefix_len;
2278 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2279}
2280
2281static struct mlxsw_sp_fib_node *
76610ebb 2282mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
9aecce1c
IS
2283 size_t addr_len, unsigned char prefix_len)
2284{
2285 struct mlxsw_sp_fib_node *fib_node;
2286
2287 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2288 if (!fib_node)
5b004412
JP
2289 return NULL;
2290
9aecce1c 2291 INIT_LIST_HEAD(&fib_node->entry_list);
76610ebb 2292 list_add(&fib_node->list, &fib->node_list);
9aecce1c
IS
2293 memcpy(fib_node->key.addr, addr, addr_len);
2294 fib_node->key.prefix_len = prefix_len;
9aecce1c
IS
2295
2296 return fib_node;
2297}
2298
2299static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2300{
9aecce1c
IS
2301 list_del(&fib_node->list);
2302 WARN_ON(!list_empty(&fib_node->entry_list));
2303 kfree(fib_node);
2304}
2305
2306static bool
2307mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2308 const struct mlxsw_sp_fib_entry *fib_entry)
2309{
2310 return list_first_entry(&fib_node->entry_list,
2311 struct mlxsw_sp_fib_entry, list) == fib_entry;
2312}
2313
2314static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2315{
2316 unsigned char prefix_len = fib_node->key.prefix_len;
76610ebb 2317 struct mlxsw_sp_fib *fib = fib_node->fib;
9aecce1c
IS
2318
2319 if (fib->prefix_ref_count[prefix_len]++ == 0)
2320 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2321}
2322
2323static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2324{
2325 unsigned char prefix_len = fib_node->key.prefix_len;
76610ebb 2326 struct mlxsw_sp_fib *fib = fib_node->fib;
9aecce1c
IS
2327
2328 if (--fib->prefix_ref_count[prefix_len] == 0)
2329 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
5b004412
JP
2330}
2331
76610ebb
IS
2332static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2333 struct mlxsw_sp_fib_node *fib_node,
2334 struct mlxsw_sp_fib *fib)
2335{
2336 struct mlxsw_sp_prefix_usage req_prefix_usage;
2337 struct mlxsw_sp_lpm_tree *lpm_tree;
2338 int err;
2339
2340 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2341 if (err)
2342 return err;
2343 fib_node->fib = fib;
2344
2345 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2346 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2347
2348 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2349 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2350 &req_prefix_usage);
2351 if (err)
2352 goto err_tree_check;
2353 } else {
2354 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2355 fib->proto);
2356 if (IS_ERR(lpm_tree))
2357 return PTR_ERR(lpm_tree);
2358 fib->lpm_tree = lpm_tree;
2359 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2360 if (err)
2361 goto err_tree_bind;
2362 }
2363
2364 mlxsw_sp_fib_node_prefix_inc(fib_node);
2365
2366 return 0;
2367
2368err_tree_bind:
2369 fib->lpm_tree = NULL;
2370 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2371err_tree_check:
2372 fib_node->fib = NULL;
2373 mlxsw_sp_fib_node_remove(fib, fib_node);
2374 return err;
2375}
2376
2377static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2378 struct mlxsw_sp_fib_node *fib_node)
2379{
2380 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2381 struct mlxsw_sp_fib *fib = fib_node->fib;
2382
2383 mlxsw_sp_fib_node_prefix_dec(fib_node);
2384
2385 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2386 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2387 fib->lpm_tree = NULL;
2388 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2389 } else {
2390 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2391 }
2392
2393 fib_node->fib = NULL;
2394 mlxsw_sp_fib_node_remove(fib, fib_node);
2395}
2396
9aecce1c
IS
2397static struct mlxsw_sp_fib_node *
2398mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2399 const struct fib_entry_notifier_info *fen_info)
5b004412 2400{
9aecce1c 2401 struct mlxsw_sp_fib_node *fib_node;
76610ebb 2402 struct mlxsw_sp_fib *fib;
9aecce1c
IS
2403 struct mlxsw_sp_vr *vr;
2404 int err;
2405
76610ebb 2406 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
9aecce1c
IS
2407 if (IS_ERR(vr))
2408 return ERR_CAST(vr);
76610ebb 2409 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
9aecce1c 2410
76610ebb 2411 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
9aecce1c
IS
2412 sizeof(fen_info->dst),
2413 fen_info->dst_len);
2414 if (fib_node)
2415 return fib_node;
5b004412 2416
76610ebb 2417 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
9aecce1c
IS
2418 sizeof(fen_info->dst),
2419 fen_info->dst_len);
2420 if (!fib_node) {
2421 err = -ENOMEM;
2422 goto err_fib_node_create;
5b004412 2423 }
9aecce1c 2424
76610ebb
IS
2425 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2426 if (err)
2427 goto err_fib_node_init;
2428
9aecce1c
IS
2429 return fib_node;
2430
76610ebb
IS
2431err_fib_node_init:
2432 mlxsw_sp_fib_node_destroy(fib_node);
9aecce1c 2433err_fib_node_create:
76610ebb 2434 mlxsw_sp_vr_put(vr);
9aecce1c 2435 return ERR_PTR(err);
5b004412
JP
2436}
2437
9aecce1c
IS
2438static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2439 struct mlxsw_sp_fib_node *fib_node)
5b004412 2440{
76610ebb 2441 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
5b004412 2442
9aecce1c
IS
2443 if (!list_empty(&fib_node->entry_list))
2444 return;
76610ebb 2445 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
9aecce1c 2446 mlxsw_sp_fib_node_destroy(fib_node);
76610ebb 2447 mlxsw_sp_vr_put(vr);
61c503f9
JP
2448}
2449
9aecce1c
IS
2450static struct mlxsw_sp_fib_entry *
2451mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2452 const struct mlxsw_sp_fib_entry_params *params)
61c503f9 2453{
61c503f9 2454 struct mlxsw_sp_fib_entry *fib_entry;
9aecce1c
IS
2455
2456 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2457 if (fib_entry->params.tb_id > params->tb_id)
2458 continue;
2459 if (fib_entry->params.tb_id != params->tb_id)
2460 break;
2461 if (fib_entry->params.tos > params->tos)
2462 continue;
2463 if (fib_entry->params.prio >= params->prio ||
2464 fib_entry->params.tos < params->tos)
2465 return fib_entry;
2466 }
2467
2468 return NULL;
2469}
2470
4283bce5
IS
2471static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2472 struct mlxsw_sp_fib_entry *new_entry)
2473{
2474 struct mlxsw_sp_fib_node *fib_node;
2475
2476 if (WARN_ON(!fib_entry))
2477 return -EINVAL;
2478
2479 fib_node = fib_entry->fib_node;
2480 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2481 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2482 fib_entry->params.tos != new_entry->params.tos ||
2483 fib_entry->params.prio != new_entry->params.prio)
2484 break;
2485 }
2486
2487 list_add_tail(&new_entry->list, &fib_entry->list);
2488 return 0;
2489}
2490
9aecce1c
IS
2491static int
2492mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
4283bce5 2493 struct mlxsw_sp_fib_entry *new_entry,
599cf8f9 2494 bool replace, bool append)
9aecce1c
IS
2495{
2496 struct mlxsw_sp_fib_entry *fib_entry;
2497
2498 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2499
4283bce5
IS
2500 if (append)
2501 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
599cf8f9
IS
2502 if (replace && WARN_ON(!fib_entry))
2503 return -EINVAL;
4283bce5 2504
599cf8f9
IS
2505 /* Insert new entry before replaced one, so that we can later
2506 * remove the second.
2507 */
9aecce1c
IS
2508 if (fib_entry) {
2509 list_add_tail(&new_entry->list, &fib_entry->list);
2510 } else {
2511 struct mlxsw_sp_fib_entry *last;
2512
2513 list_for_each_entry(last, &fib_node->entry_list, list) {
2514 if (new_entry->params.tb_id > last->params.tb_id)
2515 break;
2516 fib_entry = last;
2517 }
2518
2519 if (fib_entry)
2520 list_add(&new_entry->list, &fib_entry->list);
2521 else
2522 list_add(&new_entry->list, &fib_node->entry_list);
2523 }
2524
2525 return 0;
2526}
2527
2528static void
2529mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2530{
2531 list_del(&fib_entry->list);
2532}
2533
2534static int
2535mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2536 const struct mlxsw_sp_fib_node *fib_node,
2537 struct mlxsw_sp_fib_entry *fib_entry)
2538{
2539 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2540 return 0;
2541
2542 /* To prevent packet loss, overwrite the previously offloaded
2543 * entry.
2544 */
2545 if (!list_is_singular(&fib_node->entry_list)) {
2546 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2547 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2548
2549 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2550 }
2551
2552 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2553}
2554
2555static void
2556mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2557 const struct mlxsw_sp_fib_node *fib_node,
2558 struct mlxsw_sp_fib_entry *fib_entry)
2559{
2560 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2561 return;
2562
2563 /* Promote the next entry by overwriting the deleted entry */
2564 if (!list_is_singular(&fib_node->entry_list)) {
2565 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2566 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2567
2568 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2569 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2570 return;
2571 }
2572
2573 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2574}
2575
2576static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4283bce5 2577 struct mlxsw_sp_fib_entry *fib_entry,
599cf8f9 2578 bool replace, bool append)
9aecce1c
IS
2579{
2580 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2581 int err;
2582
599cf8f9
IS
2583 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2584 append);
9aecce1c
IS
2585 if (err)
2586 return err;
2587
2588 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2589 if (err)
2590 goto err_fib4_node_entry_add;
2591
9aecce1c
IS
2592 return 0;
2593
2594err_fib4_node_entry_add:
2595 mlxsw_sp_fib4_node_list_remove(fib_entry);
2596 return err;
2597}
2598
2599static void
2600mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2601 struct mlxsw_sp_fib_entry *fib_entry)
2602{
2603 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2604
9aecce1c
IS
2605 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2606 mlxsw_sp_fib4_node_list_remove(fib_entry);
2607}
2608
599cf8f9
IS
2609static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2610 struct mlxsw_sp_fib_entry *fib_entry,
2611 bool replace)
2612{
2613 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2614 struct mlxsw_sp_fib_entry *replaced;
2615
2616 if (!replace)
2617 return;
2618
2619 /* We inserted the new entry before replaced one */
2620 replaced = list_next_entry(fib_entry, list);
2621
2622 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2623 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2624 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2625}
2626
9aecce1c
IS
2627static int
2628mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
4283bce5 2629 const struct fib_entry_notifier_info *fen_info,
599cf8f9 2630 bool replace, bool append)
9aecce1c
IS
2631{
2632 struct mlxsw_sp_fib_entry *fib_entry;
2633 struct mlxsw_sp_fib_node *fib_node;
61c503f9
JP
2634 int err;
2635
9011b677 2636 if (mlxsw_sp->router->aborted)
b45f64d1
JP
2637 return 0;
2638
9aecce1c
IS
2639 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2640 if (IS_ERR(fib_node)) {
2641 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2642 return PTR_ERR(fib_node);
b45f64d1 2643 }
61c503f9 2644
9aecce1c
IS
2645 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
2646 if (IS_ERR(fib_entry)) {
2647 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2648 err = PTR_ERR(fib_entry);
2649 goto err_fib4_entry_create;
2650 }
5b004412 2651
599cf8f9
IS
2652 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2653 append);
b45f64d1 2654 if (err) {
9aecce1c
IS
2655 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2656 goto err_fib4_node_entry_link;
b45f64d1 2657 }
9aecce1c 2658
599cf8f9
IS
2659 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2660
61c503f9
JP
2661 return 0;
2662
9aecce1c
IS
2663err_fib4_node_entry_link:
2664 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2665err_fib4_entry_create:
2666 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
61c503f9
JP
2667 return err;
2668}
2669
37956d78
JP
2670static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2671 struct fib_entry_notifier_info *fen_info)
61c503f9 2672{
61c503f9 2673 struct mlxsw_sp_fib_entry *fib_entry;
9aecce1c 2674 struct mlxsw_sp_fib_node *fib_node;
61c503f9 2675
9011b677 2676 if (mlxsw_sp->router->aborted)
37956d78 2677 return;
b45f64d1 2678
9aecce1c
IS
2679 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2680 if (WARN_ON(!fib_entry))
37956d78 2681 return;
9aecce1c 2682 fib_node = fib_entry->fib_node;
5b004412 2683
9aecce1c
IS
2684 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2685 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2686 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
61c503f9 2687}
b45f64d1
JP
2688
2689static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2690{
2691 char ralta_pl[MLXSW_REG_RALTA_LEN];
2692 char ralst_pl[MLXSW_REG_RALST_LEN];
b5d90e6d 2693 int i, err;
b45f64d1
JP
2694
2695 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2696 MLXSW_SP_LPM_TREE_MIN);
2697 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2698 if (err)
2699 return err;
2700
2701 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2702 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2703 if (err)
2704 return err;
2705
b5d90e6d 2706 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 2707 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
b5d90e6d
IS
2708 char raltb_pl[MLXSW_REG_RALTB_LEN];
2709 char ralue_pl[MLXSW_REG_RALUE_LEN];
b45f64d1 2710
b5d90e6d
IS
2711 if (!mlxsw_sp_vr_is_used(vr))
2712 continue;
2713
2714 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2715 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2716 MLXSW_SP_LPM_TREE_MIN);
2717 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2718 raltb_pl);
2719 if (err)
2720 return err;
2721
2722 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2723 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2724 0);
2725 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2726 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2727 ralue_pl);
2728 if (err)
2729 return err;
2730 }
2731
2732 return 0;
b45f64d1
JP
2733}
2734
9aecce1c
IS
2735static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2736 struct mlxsw_sp_fib_node *fib_node)
2737{
2738 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2739
2740 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2741 bool do_break = &tmp->list == &fib_node->entry_list;
2742
2743 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2744 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2745 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2746 /* Break when entry list is empty and node was freed.
2747 * Otherwise, we'll access freed memory in the next
2748 * iteration.
2749 */
2750 if (do_break)
2751 break;
2752 }
2753}
2754
2755static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2756 struct mlxsw_sp_fib_node *fib_node)
2757{
76610ebb 2758 switch (fib_node->fib->proto) {
9aecce1c
IS
2759 case MLXSW_SP_L3_PROTO_IPV4:
2760 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2761 break;
2762 case MLXSW_SP_L3_PROTO_IPV6:
2763 WARN_ON_ONCE(1);
2764 break;
2765 }
2766}
2767
76610ebb
IS
2768static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2769 struct mlxsw_sp_vr *vr,
2770 enum mlxsw_sp_l3proto proto)
b45f64d1 2771{
76610ebb 2772 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
9aecce1c 2773 struct mlxsw_sp_fib_node *fib_node, *tmp;
76610ebb
IS
2774
2775 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2776 bool do_break = &tmp->list == &fib->node_list;
2777
2778 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2779 if (do_break)
2780 break;
2781 }
2782}
2783
2784static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
2785{
b45f64d1 2786 int i;
b45f64d1 2787
c1a38311 2788 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
9011b677 2789 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
ac571de9 2790
76610ebb 2791 if (!mlxsw_sp_vr_is_used(vr))
b45f64d1 2792 continue;
76610ebb 2793 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
b45f64d1 2794 }
ac571de9
IS
2795}
2796
2797static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2798{
2799 int err;
2800
9011b677 2801 if (mlxsw_sp->router->aborted)
d331d303
IS
2802 return;
2803 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
ac571de9 2804 mlxsw_sp_router_fib_flush(mlxsw_sp);
9011b677 2805 mlxsw_sp->router->aborted = true;
b45f64d1
JP
2806 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2807 if (err)
2808 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2809}
2810
3057224e 2811struct mlxsw_sp_fib_event_work {
a0e4761d 2812 struct work_struct work;
ad178c8e
IS
2813 union {
2814 struct fib_entry_notifier_info fen_info;
5d7bfd14 2815 struct fib_rule_notifier_info fr_info;
ad178c8e
IS
2816 struct fib_nh_notifier_info fnh_info;
2817 };
3057224e
IS
2818 struct mlxsw_sp *mlxsw_sp;
2819 unsigned long event;
2820};
2821
2822static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
b45f64d1 2823{
3057224e 2824 struct mlxsw_sp_fib_event_work *fib_work =
a0e4761d 2825 container_of(work, struct mlxsw_sp_fib_event_work, work);
3057224e 2826 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5d7bfd14 2827 struct fib_rule *rule;
599cf8f9 2828 bool replace, append;
b45f64d1
JP
2829 int err;
2830
3057224e
IS
2831 /* Protect internal structures from changes */
2832 rtnl_lock();
2833 switch (fib_work->event) {
599cf8f9 2834 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4283bce5 2835 case FIB_EVENT_ENTRY_APPEND: /* fall through */
b45f64d1 2836 case FIB_EVENT_ENTRY_ADD:
599cf8f9 2837 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
4283bce5
IS
2838 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2839 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
599cf8f9 2840 replace, append);
b45f64d1
JP
2841 if (err)
2842 mlxsw_sp_router_fib4_abort(mlxsw_sp);
3057224e 2843 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2844 break;
2845 case FIB_EVENT_ENTRY_DEL:
3057224e
IS
2846 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2847 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2848 break;
2849 case FIB_EVENT_RULE_ADD: /* fall through */
2850 case FIB_EVENT_RULE_DEL:
5d7bfd14 2851 rule = fib_work->fr_info.rule;
c7f6e665 2852 if (!fib4_rule_default(rule) && !rule->l3mdev)
5d7bfd14
IS
2853 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2854 fib_rule_put(rule);
b45f64d1 2855 break;
ad178c8e
IS
2856 case FIB_EVENT_NH_ADD: /* fall through */
2857 case FIB_EVENT_NH_DEL:
2858 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2859 fib_work->fnh_info.fib_nh);
2860 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2861 break;
b45f64d1 2862 }
3057224e
IS
2863 rtnl_unlock();
2864 kfree(fib_work);
2865}
2866
2867/* Called with rcu_read_lock() */
2868static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2869 unsigned long event, void *ptr)
2870{
3057224e
IS
2871 struct mlxsw_sp_fib_event_work *fib_work;
2872 struct fib_notifier_info *info = ptr;
7e39d115 2873 struct mlxsw_sp_router *router;
3057224e
IS
2874
2875 if (!net_eq(info->net, &init_net))
2876 return NOTIFY_DONE;
2877
2878 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2879 if (WARN_ON(!fib_work))
2880 return NOTIFY_BAD;
2881
a0e4761d 2882 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
7e39d115
IS
2883 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
2884 fib_work->mlxsw_sp = router->mlxsw_sp;
3057224e
IS
2885 fib_work->event = event;
2886
2887 switch (event) {
599cf8f9 2888 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
4283bce5 2889 case FIB_EVENT_ENTRY_APPEND: /* fall through */
3057224e
IS
2890 case FIB_EVENT_ENTRY_ADD: /* fall through */
2891 case FIB_EVENT_ENTRY_DEL:
2892 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2893 /* Take referece on fib_info to prevent it from being
2894 * freed while work is queued. Release it afterwards.
2895 */
2896 fib_info_hold(fib_work->fen_info.fi);
2897 break;
5d7bfd14
IS
2898 case FIB_EVENT_RULE_ADD: /* fall through */
2899 case FIB_EVENT_RULE_DEL:
2900 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2901 fib_rule_get(fib_work->fr_info.rule);
2902 break;
ad178c8e
IS
2903 case FIB_EVENT_NH_ADD: /* fall through */
2904 case FIB_EVENT_NH_DEL:
2905 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2906 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2907 break;
3057224e
IS
2908 }
2909
a0e4761d 2910 mlxsw_core_schedule_work(&fib_work->work);
3057224e 2911
b45f64d1
JP
2912 return NOTIFY_DONE;
2913}
2914
4724ba56
IS
2915static struct mlxsw_sp_rif *
2916mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2917 const struct net_device *dev)
2918{
2919 int i;
2920
2921 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
5f9efffb
IS
2922 if (mlxsw_sp->router->rifs[i] &&
2923 mlxsw_sp->router->rifs[i]->dev == dev)
2924 return mlxsw_sp->router->rifs[i];
4724ba56
IS
2925
2926 return NULL;
2927}
2928
2929static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2930{
2931 char ritr_pl[MLXSW_REG_RITR_LEN];
2932 int err;
2933
2934 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2935 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2936 if (WARN_ON_ONCE(err))
2937 return err;
2938
2939 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2940 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2941}
2942
2943static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
bf95233e 2944 struct mlxsw_sp_rif *rif)
4724ba56 2945{
bf95233e
AS
2946 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2947 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2948 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
4724ba56
IS
2949}
2950
bf95233e 2951static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
4724ba56
IS
2952 const struct in_device *in_dev,
2953 unsigned long event)
2954{
2955 switch (event) {
2956 case NETDEV_UP:
bf95233e 2957 if (!rif)
4724ba56
IS
2958 return true;
2959 return false;
2960 case NETDEV_DOWN:
bf95233e
AS
2961 if (rif && !in_dev->ifa_list &&
2962 !netif_is_l3_slave(rif->dev))
4724ba56
IS
2963 return true;
2964 /* It is possible we already removed the RIF ourselves
2965 * if it was assigned to a netdev that is now a bridge
2966 * or LAG slave.
2967 */
2968 return false;
2969 }
2970
2971 return false;
2972}
2973
e4f3c1c1
IS
2974static enum mlxsw_sp_rif_type
2975mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
2976 const struct net_device *dev)
2977{
2978 enum mlxsw_sp_fid_type type;
2979
2980 /* RIF type is derived from the type of the underlying FID */
2981 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
2982 type = MLXSW_SP_FID_TYPE_8021Q;
2983 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
2984 type = MLXSW_SP_FID_TYPE_8021Q;
2985 else if (netif_is_bridge_master(dev))
2986 type = MLXSW_SP_FID_TYPE_8021D;
2987 else
2988 type = MLXSW_SP_FID_TYPE_RFID;
2989
2990 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
2991}
2992
de5ed99e 2993static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
4724ba56
IS
2994{
2995 int i;
2996
de5ed99e
IS
2997 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
2998 if (!mlxsw_sp->router->rifs[i]) {
2999 *p_rif_index = i;
3000 return 0;
3001 }
3002 }
4724ba56 3003
de5ed99e 3004 return -ENOBUFS;
4724ba56
IS
3005}
3006
e4f3c1c1
IS
3007static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
3008 u16 vr_id,
3009 struct net_device *l3_dev)
4724ba56 3010{
bf95233e 3011 struct mlxsw_sp_rif *rif;
4724ba56 3012
e4f3c1c1 3013 rif = kzalloc(rif_size, GFP_KERNEL);
bf95233e 3014 if (!rif)
4724ba56
IS
3015 return NULL;
3016
bf95233e
AS
3017 INIT_LIST_HEAD(&rif->nexthop_list);
3018 INIT_LIST_HEAD(&rif->neigh_list);
3019 ether_addr_copy(rif->addr, l3_dev->dev_addr);
3020 rif->mtu = l3_dev->mtu;
3021 rif->vr_id = vr_id;
3022 rif->dev = l3_dev;
3023 rif->rif_index = rif_index;
4724ba56 3024
bf95233e 3025 return rif;
4724ba56
IS
3026}
3027
5f9efffb
IS
3028struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
3029 u16 rif_index)
3030{
3031 return mlxsw_sp->router->rifs[rif_index];
3032}
3033
fd1b9d41
AS
3034u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
3035{
3036 return rif->rif_index;
3037}
3038
3039int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
3040{
3041 return rif->dev->ifindex;
3042}
3043
4724ba56 3044static struct mlxsw_sp_rif *
e4f3c1c1
IS
3045mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
3046 const struct mlxsw_sp_rif_params *params)
4724ba56 3047{
e4f3c1c1
IS
3048 u32 tb_id = l3mdev_fib_table(params->dev);
3049 const struct mlxsw_sp_rif_ops *ops;
3050 enum mlxsw_sp_rif_type type;
bf95233e 3051 struct mlxsw_sp_rif *rif;
a1107487
IS
3052 struct mlxsw_sp_fid *fid;
3053 struct mlxsw_sp_vr *vr;
3054 u16 rif_index;
4724ba56
IS
3055 int err;
3056
e4f3c1c1
IS
3057 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
3058 ops = mlxsw_sp->router->rif_ops_arr[type];
3059
c9ec53f0
IS
3060 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
3061 if (IS_ERR(vr))
3062 return ERR_CAST(vr);
3063
de5ed99e
IS
3064 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
3065 if (err)
3066 goto err_rif_index_alloc;
4724ba56 3067
e4f3c1c1 3068 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
a13a594d
IS
3069 if (!rif) {
3070 err = -ENOMEM;
3071 goto err_rif_alloc;
3072 }
e4f3c1c1
IS
3073 rif->mlxsw_sp = mlxsw_sp;
3074 rif->ops = ops;
a13a594d 3075
e4f3c1c1
IS
3076 fid = ops->fid_get(rif);
3077 if (IS_ERR(fid)) {
3078 err = PTR_ERR(fid);
3079 goto err_fid_get;
4d93ceeb 3080 }
e4f3c1c1 3081 rif->fid = fid;
4d93ceeb 3082
e4f3c1c1
IS
3083 if (ops->setup)
3084 ops->setup(rif, params);
3085
3086 err = ops->configure(rif);
4724ba56 3087 if (err)
e4f3c1c1 3088 goto err_configure;
4724ba56 3089
e4f3c1c1 3090 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
a1107487 3091 mlxsw_sp_fid_index(fid), true);
4724ba56
IS
3092 if (err)
3093 goto err_rif_fdb_op;
3094
e4f3c1c1 3095 mlxsw_sp_rif_counters_alloc(rif);
a1107487 3096 mlxsw_sp_fid_rif_set(fid, rif);
5f9efffb 3097 mlxsw_sp->router->rifs[rif_index] = rif;
6913229e 3098 vr->rif_count++;
4724ba56 3099
bf95233e 3100 return rif;
4724ba56 3101
4724ba56 3102err_rif_fdb_op:
e4f3c1c1
IS
3103 ops->deconfigure(rif);
3104err_configure:
a1107487
IS
3105 mlxsw_sp_fid_put(fid);
3106err_fid_get:
e4f3c1c1
IS
3107 kfree(rif);
3108err_rif_alloc:
de5ed99e 3109err_rif_index_alloc:
c9ec53f0 3110 mlxsw_sp_vr_put(vr);
4724ba56
IS
3111 return ERR_PTR(err);
3112}
3113
e4f3c1c1 3114void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
4724ba56 3115{
e4f3c1c1
IS
3116 const struct mlxsw_sp_rif_ops *ops = rif->ops;
3117 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
a1107487 3118 struct mlxsw_sp_fid *fid = rif->fid;
e4f3c1c1 3119 struct mlxsw_sp_vr *vr;
4724ba56 3120
bf95233e 3121 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
e4f3c1c1 3122 vr = &mlxsw_sp->router->vrs[rif->vr_id];
e0c0afd8 3123
6913229e 3124 vr->rif_count--;
e4f3c1c1 3125 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
a1107487 3126 mlxsw_sp_fid_rif_set(fid, NULL);
e4f3c1c1
IS
3127 mlxsw_sp_rif_counters_free(rif);
3128 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
3129 mlxsw_sp_fid_index(fid), false);
3130 ops->deconfigure(rif);
a1107487 3131 mlxsw_sp_fid_put(fid);
e4f3c1c1 3132 kfree(rif);
c9ec53f0 3133 mlxsw_sp_vr_put(vr);
4724ba56
IS
3134}
3135
e4f3c1c1
IS
3136static void
3137mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
3138 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
3139{
3140 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
3141
3142 params->vid = mlxsw_sp_port_vlan->vid;
3143 params->lag = mlxsw_sp_port->lagged;
3144 if (params->lag)
3145 params->lag_id = mlxsw_sp_port->lag_id;
3146 else
3147 params->system_port = mlxsw_sp_port->local_port;
3148}
3149
7cbecf24 3150static int
a1107487 3151mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
7cbecf24 3152 struct net_device *l3_dev)
4724ba56 3153{
7cbecf24 3154 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1b8f09a0 3155 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
7cbecf24 3156 u16 vid = mlxsw_sp_port_vlan->vid;
bf95233e 3157 struct mlxsw_sp_rif *rif;
a1107487 3158 struct mlxsw_sp_fid *fid;
03ea01e9 3159 int err;
4724ba56 3160
1b8f09a0 3161 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
bf95233e 3162 if (!rif) {
e4f3c1c1
IS
3163 struct mlxsw_sp_rif_params params = {
3164 .dev = l3_dev,
3165 };
3166
3167 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
3168 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
bf95233e
AS
3169 if (IS_ERR(rif))
3170 return PTR_ERR(rif);
4724ba56
IS
3171 }
3172
a1107487 3173 /* FID was already created, just take a reference */
e4f3c1c1 3174 fid = rif->ops->fid_get(rif);
a1107487
IS
3175 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
3176 if (err)
3177 goto err_fid_port_vid_map;
3178
7cbecf24 3179 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
03ea01e9
IS
3180 if (err)
3181 goto err_port_vid_learning_set;
3182
7cbecf24 3183 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
03ea01e9
IS
3184 BR_STATE_FORWARDING);
3185 if (err)
3186 goto err_port_vid_stp_set;
3187
a1107487 3188 mlxsw_sp_port_vlan->fid = fid;
4724ba56 3189
4724ba56 3190 return 0;
03ea01e9
IS
3191
3192err_port_vid_stp_set:
7cbecf24 3193 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
03ea01e9 3194err_port_vid_learning_set:
a1107487
IS
3195 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3196err_fid_port_vid_map:
3197 mlxsw_sp_fid_put(fid);
03ea01e9 3198 return err;
4724ba56
IS
3199}
3200
a1107487
IS
3201void
3202mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4724ba56 3203{
ce95e154 3204 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7cbecf24 3205 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
ce95e154 3206 u16 vid = mlxsw_sp_port_vlan->vid;
ce95e154 3207
a1107487
IS
3208 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
3209 return;
4aafc368 3210
a1107487 3211 mlxsw_sp_port_vlan->fid = NULL;
7cbecf24
IS
3212 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
3213 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
a1107487
IS
3214 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3215 /* If router port holds the last reference on the rFID, then the
3216 * associated Sub-port RIF will be destroyed.
3217 */
3218 mlxsw_sp_fid_put(fid);
4724ba56
IS
3219}
3220
7cbecf24
IS
3221static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
3222 struct net_device *port_dev,
3223 unsigned long event, u16 vid)
4724ba56
IS
3224{
3225 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
ce95e154 3226 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4724ba56 3227
ce95e154 3228 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
7cbecf24
IS
3229 if (WARN_ON(!mlxsw_sp_port_vlan))
3230 return -EINVAL;
4724ba56
IS
3231
3232 switch (event) {
3233 case NETDEV_UP:
a1107487 3234 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
7cbecf24 3235 l3_dev);
4724ba56 3236 case NETDEV_DOWN:
a1107487 3237 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4724ba56
IS
3238 break;
3239 }
3240
3241 return 0;
3242}
3243
3244static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3245 unsigned long event)
3246{
2b94e58d
JP
3247 if (netif_is_bridge_port(port_dev) ||
3248 netif_is_lag_port(port_dev) ||
3249 netif_is_ovs_port(port_dev))
4724ba56
IS
3250 return 0;
3251
7cbecf24 3252 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
4724ba56
IS
3253}
3254
3255static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3256 struct net_device *lag_dev,
3257 unsigned long event, u16 vid)
3258{
3259 struct net_device *port_dev;
3260 struct list_head *iter;
3261 int err;
3262
3263 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3264 if (mlxsw_sp_port_dev_check(port_dev)) {
7cbecf24
IS
3265 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
3266 port_dev,
3267 event, vid);
4724ba56
IS
3268 if (err)
3269 return err;
3270 }
3271 }
3272
3273 return 0;
3274}
3275
3276static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3277 unsigned long event)
3278{
3279 if (netif_is_bridge_port(lag_dev))
3280 return 0;
3281
3282 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3283}
3284
4724ba56 3285static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
4724ba56
IS
3286 unsigned long event)
3287{
3288 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
e4f3c1c1
IS
3289 struct mlxsw_sp_rif_params params = {
3290 .dev = l3_dev,
3291 };
a1107487 3292 struct mlxsw_sp_rif *rif;
4724ba56
IS
3293
3294 switch (event) {
3295 case NETDEV_UP:
e4f3c1c1
IS
3296 rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
3297 if (IS_ERR(rif))
3298 return PTR_ERR(rif);
3299 break;
4724ba56 3300 case NETDEV_DOWN:
a1107487 3301 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
e4f3c1c1 3302 mlxsw_sp_rif_destroy(rif);
4724ba56
IS
3303 break;
3304 }
3305
3306 return 0;
3307}
3308
3309static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3310 unsigned long event)
3311{
3312 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4724ba56
IS
3313 u16 vid = vlan_dev_vlan_id(vlan_dev);
3314
6b27c8ad
IS
3315 if (netif_is_bridge_port(vlan_dev))
3316 return 0;
3317
4724ba56 3318 if (mlxsw_sp_port_dev_check(real_dev))
7cbecf24
IS
3319 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
3320 event, vid);
4724ba56
IS
3321 else if (netif_is_lag_master(real_dev))
3322 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3323 vid);
c57529e1 3324 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
a1107487 3325 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
4724ba56
IS
3326
3327 return 0;
3328}
3329
b1e45526
IS
3330static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3331 unsigned long event)
3332{
3333 if (mlxsw_sp_port_dev_check(dev))
3334 return mlxsw_sp_inetaddr_port_event(dev, event);
3335 else if (netif_is_lag_master(dev))
3336 return mlxsw_sp_inetaddr_lag_event(dev, event);
3337 else if (netif_is_bridge_master(dev))
a1107487 3338 return mlxsw_sp_inetaddr_bridge_event(dev, event);
b1e45526
IS
3339 else if (is_vlan_dev(dev))
3340 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3341 else
3342 return 0;
3343}
3344
4724ba56
IS
3345int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3346 unsigned long event, void *ptr)
3347{
3348 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3349 struct net_device *dev = ifa->ifa_dev->dev;
3350 struct mlxsw_sp *mlxsw_sp;
bf95233e 3351 struct mlxsw_sp_rif *rif;
4724ba56
IS
3352 int err = 0;
3353
3354 mlxsw_sp = mlxsw_sp_lower_get(dev);
3355 if (!mlxsw_sp)
3356 goto out;
3357
bf95233e
AS
3358 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3359 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
4724ba56
IS
3360 goto out;
3361
b1e45526 3362 err = __mlxsw_sp_inetaddr_event(dev, event);
4724ba56
IS
3363out:
3364 return notifier_from_errno(err);
3365}
3366
bf95233e 3367static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
4724ba56
IS
3368 const char *mac, int mtu)
3369{
3370 char ritr_pl[MLXSW_REG_RITR_LEN];
3371 int err;
3372
bf95233e 3373 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
4724ba56
IS
3374 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3375 if (err)
3376 return err;
3377
3378 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3379 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3380 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3381 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3382}
3383
3384int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3385{
3386 struct mlxsw_sp *mlxsw_sp;
bf95233e 3387 struct mlxsw_sp_rif *rif;
a1107487 3388 u16 fid_index;
4724ba56
IS
3389 int err;
3390
3391 mlxsw_sp = mlxsw_sp_lower_get(dev);
3392 if (!mlxsw_sp)
3393 return 0;
3394
bf95233e
AS
3395 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3396 if (!rif)
4724ba56 3397 return 0;
a1107487 3398 fid_index = mlxsw_sp_fid_index(rif->fid);
4724ba56 3399
a1107487 3400 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
4724ba56
IS
3401 if (err)
3402 return err;
3403
bf95233e
AS
3404 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3405 dev->mtu);
4724ba56
IS
3406 if (err)
3407 goto err_rif_edit;
3408
a1107487 3409 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
4724ba56
IS
3410 if (err)
3411 goto err_rif_fdb_op;
3412
bf95233e
AS
3413 ether_addr_copy(rif->addr, dev->dev_addr);
3414 rif->mtu = dev->mtu;
4724ba56 3415
bf95233e 3416 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
4724ba56
IS
3417
3418 return 0;
3419
3420err_rif_fdb_op:
bf95233e 3421 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
4724ba56 3422err_rif_edit:
a1107487 3423 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
4724ba56
IS
3424 return err;
3425}
3426
b1e45526
IS
3427static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3428 struct net_device *l3_dev)
7179eb5a 3429{
b1e45526 3430 struct mlxsw_sp_rif *rif;
7179eb5a 3431
b1e45526
IS
3432 /* If netdev is already associated with a RIF, then we need to
3433 * destroy it and create a new one with the new virtual router ID.
7179eb5a 3434 */
b1e45526
IS
3435 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3436 if (rif)
3437 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
7179eb5a 3438
b1e45526 3439 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
7179eb5a
IS
3440}
3441
b1e45526
IS
3442static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3443 struct net_device *l3_dev)
7179eb5a 3444{
b1e45526 3445 struct mlxsw_sp_rif *rif;
7179eb5a 3446
b1e45526
IS
3447 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3448 if (!rif)
7179eb5a 3449 return;
b1e45526 3450 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
7179eb5a
IS
3451}
3452
b1e45526
IS
3453int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3454 struct netdev_notifier_changeupper_info *info)
3d70e458 3455{
b1e45526
IS
3456 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3457 int err = 0;
3d70e458 3458
b1e45526
IS
3459 if (!mlxsw_sp)
3460 return 0;
3d70e458 3461
b1e45526
IS
3462 switch (event) {
3463 case NETDEV_PRECHANGEUPPER:
3464 return 0;
3465 case NETDEV_CHANGEUPPER:
3466 if (info->linking)
3467 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3468 else
3469 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3470 break;
3471 }
3d70e458 3472
b1e45526 3473 return err;
3d70e458
IS
3474}
3475
e4f3c1c1
IS
3476static struct mlxsw_sp_rif_subport *
3477mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
a1107487 3478{
e4f3c1c1
IS
3479 return container_of(rif, struct mlxsw_sp_rif_subport, common);
3480}
3481
3482static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
3483 const struct mlxsw_sp_rif_params *params)
3484{
3485 struct mlxsw_sp_rif_subport *rif_subport;
3486
3487 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3488 rif_subport->vid = params->vid;
3489 rif_subport->lag = params->lag;
3490 if (params->lag)
3491 rif_subport->lag_id = params->lag_id;
a1107487 3492 else
e4f3c1c1
IS
3493 rif_subport->system_port = params->system_port;
3494}
3495
3496static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
3497{
3498 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3499 struct mlxsw_sp_rif_subport *rif_subport;
3500 char ritr_pl[MLXSW_REG_RITR_LEN];
3501
3502 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3503 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
3504 rif->rif_index, rif->vr_id, rif->dev->mtu,
3505 rif->dev->dev_addr);
3506 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
3507 rif_subport->lag ? rif_subport->lag_id :
3508 rif_subport->system_port,
3509 rif_subport->vid);
3510
3511 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3512}
3513
3514static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
3515{
3516 return mlxsw_sp_rif_subport_op(rif, true);
a1107487
IS
3517}
3518
e4f3c1c1
IS
3519static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
3520{
3521 mlxsw_sp_rif_subport_op(rif, false);
3522}
3523
3524static struct mlxsw_sp_fid *
3525mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
3526{
3527 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
3528}
3529
3530static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
3531 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
3532 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
3533 .setup = mlxsw_sp_rif_subport_setup,
3534 .configure = mlxsw_sp_rif_subport_configure,
3535 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
3536 .fid_get = mlxsw_sp_rif_subport_fid_get,
3537};
3538
3539static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
3540 enum mlxsw_reg_ritr_if_type type,
3541 u16 vid_fid, bool enable)
3542{
3543 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3544 char ritr_pl[MLXSW_REG_RITR_LEN];
3545
3546 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
3547 rif->dev->mtu, rif->dev->dev_addr);
3548 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
3549
3550 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3551}
3552
3553static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3554{
3555 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3556}
3557
3558static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
3559{
3560 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3561 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3562 int err;
3563
3564 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
3565 if (err)
3566 return err;
3567
3568 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3569 mlxsw_sp_router_port(mlxsw_sp), true);
3570 if (err)
3571 goto err_fid_bc_flood_set;
3572
3573 return 0;
3574
3575err_fid_bc_flood_set:
3576 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3577 return err;
3578}
3579
3580static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
3581{
3582 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3583 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3584
3585 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3586 mlxsw_sp_router_port(mlxsw_sp), false);
3587 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3588}
3589
3590static struct mlxsw_sp_fid *
3591mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
3592{
3593 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
3594
3595 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
3596}
3597
3598static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
3599 .type = MLXSW_SP_RIF_TYPE_VLAN,
3600 .rif_size = sizeof(struct mlxsw_sp_rif),
3601 .configure = mlxsw_sp_rif_vlan_configure,
3602 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
3603 .fid_get = mlxsw_sp_rif_vlan_fid_get,
3604};
3605
3606static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
3607{
3608 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3609 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3610 int err;
3611
3612 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
3613 true);
3614 if (err)
3615 return err;
3616
3617 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3618 mlxsw_sp_router_port(mlxsw_sp), true);
3619 if (err)
3620 goto err_fid_bc_flood_set;
3621
3622 return 0;
3623
3624err_fid_bc_flood_set:
3625 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3626 return err;
3627}
3628
3629static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
3630{
3631 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3632 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3633
3634 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3635 mlxsw_sp_router_port(mlxsw_sp), false);
3636 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3637}
3638
3639static struct mlxsw_sp_fid *
3640mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
3641{
3642 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
3643}
3644
3645static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
3646 .type = MLXSW_SP_RIF_TYPE_FID,
3647 .rif_size = sizeof(struct mlxsw_sp_rif),
3648 .configure = mlxsw_sp_rif_fid_configure,
3649 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
3650 .fid_get = mlxsw_sp_rif_fid_fid_get,
3651};
3652
3653static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
3654 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
3655 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
3656 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
3657};
3658
348b8fc3
IS
3659static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
3660{
3661 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3662
3663 mlxsw_sp->router->rifs = kcalloc(max_rifs,
3664 sizeof(struct mlxsw_sp_rif *),
3665 GFP_KERNEL);
3666 if (!mlxsw_sp->router->rifs)
3667 return -ENOMEM;
e4f3c1c1
IS
3668
3669 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
3670
348b8fc3
IS
3671 return 0;
3672}
3673
3674static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
3675{
3676 int i;
3677
3678 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3679 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
3680
3681 kfree(mlxsw_sp->router->rifs);
3682}
3683
c3852ef7
IS
3684static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3685{
7e39d115 3686 struct mlxsw_sp_router *router;
c3852ef7
IS
3687
3688 /* Flush pending FIB notifications and then flush the device's
3689 * table before requesting another dump. The FIB notification
3690 * block is unregistered, so no need to take RTNL.
3691 */
3692 mlxsw_core_flush_owq();
7e39d115
IS
3693 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3694 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
c3852ef7
IS
3695}
3696
4724ba56
IS
3697static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3698{
3699 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3700 u64 max_rifs;
3701 int err;
3702
3703 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3704 return -EIO;
4724ba56 3705 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
4724ba56
IS
3706
3707 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3708 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3709 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3710 if (err)
348b8fc3 3711 return err;
4724ba56 3712 return 0;
4724ba56
IS
3713}
3714
3715static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3716{
3717 char rgcr_pl[MLXSW_REG_RGCR_LEN];
4724ba56
IS
3718
3719 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3720 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
4724ba56
IS
3721}
3722
b45f64d1
JP
3723int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3724{
9011b677 3725 struct mlxsw_sp_router *router;
b45f64d1
JP
3726 int err;
3727
9011b677
IS
3728 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3729 if (!router)
3730 return -ENOMEM;
3731 mlxsw_sp->router = router;
3732 router->mlxsw_sp = mlxsw_sp;
3733
3734 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
b45f64d1
JP
3735 err = __mlxsw_sp_router_init(mlxsw_sp);
3736 if (err)
9011b677 3737 goto err_router_init;
b45f64d1 3738
348b8fc3
IS
3739 err = mlxsw_sp_rifs_init(mlxsw_sp);
3740 if (err)
3741 goto err_rifs_init;
3742
9011b677 3743 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
c53b8e1b
IS
3744 &mlxsw_sp_nexthop_ht_params);
3745 if (err)
3746 goto err_nexthop_ht_init;
3747
9011b677 3748 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
e9ad5e7d
IS
3749 &mlxsw_sp_nexthop_group_ht_params);
3750 if (err)
3751 goto err_nexthop_group_ht_init;
3752
8494ab06
IS
3753 err = mlxsw_sp_lpm_init(mlxsw_sp);
3754 if (err)
3755 goto err_lpm_init;
3756
b45f64d1
JP
3757 err = mlxsw_sp_vrs_init(mlxsw_sp);
3758 if (err)
3759 goto err_vrs_init;
3760
8c9583a8 3761 err = mlxsw_sp_neigh_init(mlxsw_sp);
b45f64d1
JP
3762 if (err)
3763 goto err_neigh_init;
3764
7e39d115
IS
3765 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
3766 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
c3852ef7
IS
3767 mlxsw_sp_router_fib_dump_flush);
3768 if (err)
3769 goto err_register_fib_notifier;
3770
b45f64d1
JP
3771 return 0;
3772
c3852ef7
IS
3773err_register_fib_notifier:
3774 mlxsw_sp_neigh_fini(mlxsw_sp);
b45f64d1
JP
3775err_neigh_init:
3776 mlxsw_sp_vrs_fini(mlxsw_sp);
3777err_vrs_init:
8494ab06
IS
3778 mlxsw_sp_lpm_fini(mlxsw_sp);
3779err_lpm_init:
9011b677 3780 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
e9ad5e7d 3781err_nexthop_group_ht_init:
9011b677 3782 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
c53b8e1b 3783err_nexthop_ht_init:
348b8fc3
IS
3784 mlxsw_sp_rifs_fini(mlxsw_sp);
3785err_rifs_init:
b45f64d1 3786 __mlxsw_sp_router_fini(mlxsw_sp);
9011b677
IS
3787err_router_init:
3788 kfree(mlxsw_sp->router);
b45f64d1
JP
3789 return err;
3790}
3791
3792void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3793{
7e39d115 3794 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
b45f64d1
JP
3795 mlxsw_sp_neigh_fini(mlxsw_sp);
3796 mlxsw_sp_vrs_fini(mlxsw_sp);
8494ab06 3797 mlxsw_sp_lpm_fini(mlxsw_sp);
9011b677
IS
3798 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3799 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
348b8fc3 3800 mlxsw_sp_rifs_fini(mlxsw_sp);
b45f64d1 3801 __mlxsw_sp_router_fini(mlxsw_sp);
9011b677 3802 kfree(mlxsw_sp->router);
b45f64d1 3803}