]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
mlxsw: spectrum_router: Reflect nexthop status changes
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_router.c
CommitLineData
464dce18
IS
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
c723c735 6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
464dce18
IS
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
5e9c16cc
JP
39#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
c723c735
YG
42#include <linux/notifier.h>
43#include <net/netevent.h>
6cf3c971
JP
44#include <net/neighbour.h>
45#include <net/arp.h>
b45f64d1 46#include <net/ip_fib.h>
464dce18
IS
47
48#include "spectrum.h"
49#include "core.h"
50#include "reg.h"
51
53342023
JP
52#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
53 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
54
6b75c480
JP
55static bool
56mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
57 struct mlxsw_sp_prefix_usage *prefix_usage2)
58{
59 unsigned char prefix;
60
61 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
62 if (!test_bit(prefix, prefix_usage2->b))
63 return false;
64 }
65 return true;
66}
67
53342023
JP
68static bool
69mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
70 struct mlxsw_sp_prefix_usage *prefix_usage2)
71{
72 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
73}
74
6b75c480
JP
75static bool
76mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
77{
78 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
79
80 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
81}
82
83static void
84mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
85 struct mlxsw_sp_prefix_usage *prefix_usage2)
86{
87 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
88}
89
90static void
91mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
92{
93 memset(prefix_usage, 0, sizeof(*prefix_usage));
94}
95
5e9c16cc
JP
96static void
97mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
98 unsigned char prefix_len)
99{
100 set_bit(prefix_len, prefix_usage->b);
101}
102
103static void
104mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
105 unsigned char prefix_len)
106{
107 clear_bit(prefix_len, prefix_usage->b);
108}
109
110struct mlxsw_sp_fib_key {
5b004412 111 struct net_device *dev;
5e9c16cc
JP
112 unsigned char addr[sizeof(struct in6_addr)];
113 unsigned char prefix_len;
114};
115
61c503f9
JP
116enum mlxsw_sp_fib_entry_type {
117 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
118 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
119 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
120};
121
a7ff87ac
JP
122struct mlxsw_sp_nexthop_group;
123
5e9c16cc
JP
124struct mlxsw_sp_fib_entry {
125 struct rhash_head ht_node;
b45f64d1 126 struct list_head list;
5e9c16cc 127 struct mlxsw_sp_fib_key key;
61c503f9 128 enum mlxsw_sp_fib_entry_type type;
5b004412 129 unsigned int ref_count;
61c503f9 130 struct mlxsw_sp_vr *vr;
a7ff87ac
JP
131 struct list_head nexthop_group_node;
132 struct mlxsw_sp_nexthop_group *nh_group;
013b20f9 133 bool offloaded;
5e9c16cc
JP
134};
135
136struct mlxsw_sp_fib {
137 struct rhashtable ht;
b45f64d1 138 struct list_head entry_list;
5e9c16cc
JP
139 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
140 struct mlxsw_sp_prefix_usage prefix_usage;
141};
142
143static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
144 .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
145 .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
146 .key_len = sizeof(struct mlxsw_sp_fib_key),
147 .automatic_shrinking = true,
148};
149
150static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
151 struct mlxsw_sp_fib_entry *fib_entry)
152{
153 unsigned char prefix_len = fib_entry->key.prefix_len;
154 int err;
155
156 err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
157 mlxsw_sp_fib_ht_params);
158 if (err)
159 return err;
b45f64d1 160 list_add_tail(&fib_entry->list, &fib->entry_list);
5e9c16cc
JP
161 if (fib->prefix_ref_count[prefix_len]++ == 0)
162 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
163 return 0;
164}
165
166static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
167 struct mlxsw_sp_fib_entry *fib_entry)
168{
169 unsigned char prefix_len = fib_entry->key.prefix_len;
170
171 if (--fib->prefix_ref_count[prefix_len] == 0)
172 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
b45f64d1 173 list_del(&fib_entry->list);
5e9c16cc
JP
174 rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
175 mlxsw_sp_fib_ht_params);
176}
177
178static struct mlxsw_sp_fib_entry *
179mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
5b004412
JP
180 size_t addr_len, unsigned char prefix_len,
181 struct net_device *dev)
5e9c16cc
JP
182{
183 struct mlxsw_sp_fib_entry *fib_entry;
184
185 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
186 if (!fib_entry)
187 return NULL;
5b004412 188 fib_entry->key.dev = dev;
5e9c16cc
JP
189 memcpy(fib_entry->key.addr, addr, addr_len);
190 fib_entry->key.prefix_len = prefix_len;
191 return fib_entry;
192}
193
194static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
195{
196 kfree(fib_entry);
197}
198
199static struct mlxsw_sp_fib_entry *
200mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
5b004412
JP
201 size_t addr_len, unsigned char prefix_len,
202 struct net_device *dev)
5e9c16cc 203{
5b004412 204 struct mlxsw_sp_fib_key key;
5e9c16cc 205
5b004412
JP
206 memset(&key, 0, sizeof(key));
207 key.dev = dev;
5e9c16cc
JP
208 memcpy(key.addr, addr, addr_len);
209 key.prefix_len = prefix_len;
210 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
211}
212
213static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
214{
215 struct mlxsw_sp_fib *fib;
216 int err;
217
218 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
219 if (!fib)
220 return ERR_PTR(-ENOMEM);
221 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
222 if (err)
223 goto err_rhashtable_init;
b45f64d1 224 INIT_LIST_HEAD(&fib->entry_list);
5e9c16cc
JP
225 return fib;
226
227err_rhashtable_init:
228 kfree(fib);
229 return ERR_PTR(err);
230}
231
232static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
233{
234 rhashtable_destroy(&fib->ht);
235 kfree(fib);
236}
237
53342023
JP
238static struct mlxsw_sp_lpm_tree *
239mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
240{
241 static struct mlxsw_sp_lpm_tree *lpm_tree;
242 int i;
243
244 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
245 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
246 if (lpm_tree->ref_count == 0) {
247 if (one_reserved)
248 one_reserved = false;
249 else
250 return lpm_tree;
251 }
252 }
253 return NULL;
254}
255
256static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
257 struct mlxsw_sp_lpm_tree *lpm_tree)
258{
259 char ralta_pl[MLXSW_REG_RALTA_LEN];
260
1a9234e6
IS
261 mlxsw_reg_ralta_pack(ralta_pl, true,
262 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
263 lpm_tree->id);
53342023
JP
264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
265}
266
267static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
268 struct mlxsw_sp_lpm_tree *lpm_tree)
269{
270 char ralta_pl[MLXSW_REG_RALTA_LEN];
271
1a9234e6
IS
272 mlxsw_reg_ralta_pack(ralta_pl, false,
273 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
274 lpm_tree->id);
53342023
JP
275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
276}
277
278static int
279mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
280 struct mlxsw_sp_prefix_usage *prefix_usage,
281 struct mlxsw_sp_lpm_tree *lpm_tree)
282{
283 char ralst_pl[MLXSW_REG_RALST_LEN];
284 u8 root_bin = 0;
285 u8 prefix;
286 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
287
288 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
289 root_bin = prefix;
290
291 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
292 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
293 if (prefix == 0)
294 continue;
295 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
296 MLXSW_REG_RALST_BIN_NO_CHILD);
297 last_prefix = prefix;
298 }
299 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
300}
301
302static struct mlxsw_sp_lpm_tree *
303mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
304 struct mlxsw_sp_prefix_usage *prefix_usage,
305 enum mlxsw_sp_l3proto proto, bool one_reserved)
306{
307 struct mlxsw_sp_lpm_tree *lpm_tree;
308 int err;
309
310 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
311 if (!lpm_tree)
312 return ERR_PTR(-EBUSY);
313 lpm_tree->proto = proto;
314 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
315 if (err)
316 return ERR_PTR(err);
317
318 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
319 lpm_tree);
320 if (err)
321 goto err_left_struct_set;
2083d367
JP
322 memcpy(&lpm_tree->prefix_usage, prefix_usage,
323 sizeof(lpm_tree->prefix_usage));
53342023
JP
324 return lpm_tree;
325
326err_left_struct_set:
327 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
328 return ERR_PTR(err);
329}
330
331static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
332 struct mlxsw_sp_lpm_tree *lpm_tree)
333{
334 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
335}
336
337static struct mlxsw_sp_lpm_tree *
338mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
339 struct mlxsw_sp_prefix_usage *prefix_usage,
340 enum mlxsw_sp_l3proto proto, bool one_reserved)
341{
342 struct mlxsw_sp_lpm_tree *lpm_tree;
343 int i;
344
345 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
346 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
8b99becd
JP
347 if (lpm_tree->ref_count != 0 &&
348 lpm_tree->proto == proto &&
53342023
JP
349 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
350 prefix_usage))
351 goto inc_ref_count;
352 }
353 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
354 proto, one_reserved);
355 if (IS_ERR(lpm_tree))
356 return lpm_tree;
357
358inc_ref_count:
359 lpm_tree->ref_count++;
360 return lpm_tree;
361}
362
363static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
364 struct mlxsw_sp_lpm_tree *lpm_tree)
365{
366 if (--lpm_tree->ref_count == 0)
367 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
368 return 0;
369}
370
371static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
372{
373 struct mlxsw_sp_lpm_tree *lpm_tree;
374 int i;
375
376 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
377 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
378 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
379 }
380}
381
6b75c480
JP
382static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
383{
384 struct mlxsw_sp_vr *vr;
385 int i;
386
c1a38311 387 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
6b75c480
JP
388 vr = &mlxsw_sp->router.vrs[i];
389 if (!vr->used)
390 return vr;
391 }
392 return NULL;
393}
394
395static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
396 struct mlxsw_sp_vr *vr)
397{
398 char raltb_pl[MLXSW_REG_RALTB_LEN];
399
1a9234e6
IS
400 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
401 (enum mlxsw_reg_ralxx_protocol) vr->proto,
402 vr->lpm_tree->id);
6b75c480
JP
403 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
404}
405
406static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
407 struct mlxsw_sp_vr *vr)
408{
409 char raltb_pl[MLXSW_REG_RALTB_LEN];
410
411 /* Bind to tree 0 which is default */
1a9234e6
IS
412 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
413 (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
6b75c480
JP
414 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
415}
416
417static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
418{
419 /* For our purpose, squash main and local table into one */
420 if (tb_id == RT_TABLE_LOCAL)
421 tb_id = RT_TABLE_MAIN;
422 return tb_id;
423}
424
425static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
426 u32 tb_id,
427 enum mlxsw_sp_l3proto proto)
428{
429 struct mlxsw_sp_vr *vr;
430 int i;
431
432 tb_id = mlxsw_sp_fix_tb_id(tb_id);
9497c042 433
c1a38311 434 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
6b75c480
JP
435 vr = &mlxsw_sp->router.vrs[i];
436 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
437 return vr;
438 }
439 return NULL;
440}
441
442static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
443 unsigned char prefix_len,
444 u32 tb_id,
445 enum mlxsw_sp_l3proto proto)
446{
447 struct mlxsw_sp_prefix_usage req_prefix_usage;
448 struct mlxsw_sp_lpm_tree *lpm_tree;
449 struct mlxsw_sp_vr *vr;
450 int err;
451
452 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
453 if (!vr)
454 return ERR_PTR(-EBUSY);
455 vr->fib = mlxsw_sp_fib_create();
456 if (IS_ERR(vr->fib))
457 return ERR_CAST(vr->fib);
458
459 vr->proto = proto;
460 vr->tb_id = tb_id;
461 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
462 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
463 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
464 proto, true);
465 if (IS_ERR(lpm_tree)) {
466 err = PTR_ERR(lpm_tree);
467 goto err_tree_get;
468 }
469 vr->lpm_tree = lpm_tree;
470 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
471 if (err)
472 goto err_tree_bind;
473
474 vr->used = true;
475 return vr;
476
477err_tree_bind:
478 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
479err_tree_get:
480 mlxsw_sp_fib_destroy(vr->fib);
481
482 return ERR_PTR(err);
483}
484
485static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
486 struct mlxsw_sp_vr *vr)
487{
488 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
489 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
490 mlxsw_sp_fib_destroy(vr->fib);
491 vr->used = false;
492}
493
494static int
495mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
496 struct mlxsw_sp_prefix_usage *req_prefix_usage)
497{
498 struct mlxsw_sp_lpm_tree *lpm_tree;
499
500 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
501 &vr->lpm_tree->prefix_usage))
502 return 0;
503
504 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
505 vr->proto, false);
506 if (IS_ERR(lpm_tree)) {
507 /* We failed to get a tree according to the required
508 * prefix usage. However, the current tree might be still good
509 * for us if our requirement is subset of the prefixes used
510 * in the tree.
511 */
512 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
513 &vr->lpm_tree->prefix_usage))
514 return 0;
515 return PTR_ERR(lpm_tree);
516 }
517
518 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
519 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
520 vr->lpm_tree = lpm_tree;
521 return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
522}
523
524static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
525 unsigned char prefix_len,
526 u32 tb_id,
527 enum mlxsw_sp_l3proto proto)
528{
529 struct mlxsw_sp_vr *vr;
530 int err;
531
532 tb_id = mlxsw_sp_fix_tb_id(tb_id);
533 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
534 if (!vr) {
535 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
536 if (IS_ERR(vr))
537 return vr;
538 } else {
539 struct mlxsw_sp_prefix_usage req_prefix_usage;
540
541 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
542 &vr->fib->prefix_usage);
543 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
544 /* Need to replace LPM tree in case new prefix is required. */
545 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
546 &req_prefix_usage);
547 if (err)
548 return ERR_PTR(err);
549 }
550 return vr;
551}
552
553static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
554{
555 /* Destroy virtual router entity in case the associated FIB is empty
556 * and allow it to be used for other tables in future. Otherwise,
557 * check if some prefix usage did not disappear and change tree if
558 * that is the case. Note that in case new, smaller tree cannot be
559 * allocated, the original one will be kept being used.
560 */
561 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
562 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
563 else
564 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
565 &vr->fib->prefix_usage);
566}
567
9497c042 568static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
6b75c480
JP
569{
570 struct mlxsw_sp_vr *vr;
c1a38311 571 u64 max_vrs;
6b75c480
JP
572 int i;
573
c1a38311 574 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
9497c042
NF
575 return -EIO;
576
c1a38311
JP
577 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
578 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
9497c042
NF
579 GFP_KERNEL);
580 if (!mlxsw_sp->router.vrs)
581 return -ENOMEM;
582
c1a38311 583 for (i = 0; i < max_vrs; i++) {
6b75c480
JP
584 vr = &mlxsw_sp->router.vrs[i];
585 vr->id = i;
586 }
9497c042
NF
587
588 return 0;
589}
590
ac571de9
IS
591static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
592
9497c042
NF
593static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
594{
3057224e
IS
595 /* At this stage we're guaranteed not to have new incoming
596 * FIB notifications and the work queue is free from FIBs
597 * sitting on top of mlxsw netdevs. However, we can still
598 * have other FIBs queued. Flush the queue before flushing
599 * the device's tables. No need for locks, as we're the only
600 * writer.
601 */
602 mlxsw_core_flush_owq();
ac571de9 603 mlxsw_sp_router_fib_flush(mlxsw_sp);
9497c042 604 kfree(mlxsw_sp->router.vrs);
6b75c480
JP
605}
606
6cf3c971 607struct mlxsw_sp_neigh_key {
33b1341c 608 struct neighbour *n;
6cf3c971
JP
609};
610
611struct mlxsw_sp_neigh_entry {
612 struct rhash_head ht_node;
613 struct mlxsw_sp_neigh_key key;
614 u16 rif;
5c8802f1 615 bool connected;
a6bf9e93 616 unsigned char ha[ETH_ALEN];
a7ff87ac
JP
617 struct list_head nexthop_list; /* list of nexthops using
618 * this neigh entry
619 */
b2157149 620 struct list_head nexthop_neighs_list_node;
6cf3c971
JP
621};
622
623static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
624 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
625 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
626 .key_len = sizeof(struct mlxsw_sp_neigh_key),
627};
628
6cf3c971 629static struct mlxsw_sp_neigh_entry *
5c8802f1
IS
630mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
631 u16 rif)
6cf3c971
JP
632{
633 struct mlxsw_sp_neigh_entry *neigh_entry;
634
5c8802f1 635 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
6cf3c971
JP
636 if (!neigh_entry)
637 return NULL;
5c8802f1 638
33b1341c 639 neigh_entry->key.n = n;
6cf3c971 640 neigh_entry->rif = rif;
a7ff87ac 641 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
5c8802f1 642
6cf3c971
JP
643 return neigh_entry;
644}
645
5c8802f1 646static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971
JP
647{
648 kfree(neigh_entry);
649}
650
5c8802f1
IS
651static int
652mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
653 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 654{
5c8802f1
IS
655 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
656 &neigh_entry->ht_node,
657 mlxsw_sp_neigh_ht_params);
658}
6cf3c971 659
5c8802f1
IS
660static void
661mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
662 struct mlxsw_sp_neigh_entry *neigh_entry)
663{
664 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
665 &neigh_entry->ht_node,
666 mlxsw_sp_neigh_ht_params);
6cf3c971
JP
667}
668
5c8802f1
IS
669static struct mlxsw_sp_neigh_entry *
670mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
6cf3c971 671{
6cf3c971
JP
672 struct mlxsw_sp_neigh_entry *neigh_entry;
673 struct mlxsw_sp_rif *r;
6cf3c971
JP
674 int err;
675
51af96b5 676 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
5c8802f1
IS
677 if (!r)
678 return ERR_PTR(-EINVAL);
6cf3c971 679
5c8802f1 680 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
6cf3c971 681 if (!neigh_entry)
5c8802f1
IS
682 return ERR_PTR(-ENOMEM);
683
6cf3c971
JP
684 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
685 if (err)
686 goto err_neigh_entry_insert;
5c8802f1
IS
687
688 return neigh_entry;
6cf3c971
JP
689
690err_neigh_entry_insert:
5c8802f1
IS
691 mlxsw_sp_neigh_entry_free(neigh_entry);
692 return ERR_PTR(err);
6cf3c971
JP
693}
694
5c8802f1
IS
695static void
696mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
697 struct mlxsw_sp_neigh_entry *neigh_entry)
6cf3c971 698{
5c8802f1
IS
699 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
700 mlxsw_sp_neigh_entry_free(neigh_entry);
701}
6cf3c971 702
5c8802f1
IS
703static struct mlxsw_sp_neigh_entry *
704mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
705{
706 struct mlxsw_sp_neigh_key key;
6cf3c971 707
5c8802f1
IS
708 key.n = n;
709 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
710 &key, mlxsw_sp_neigh_ht_params);
6cf3c971
JP
711}
712
c723c735
YG
713static void
714mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
715{
716 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
717
718 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
719}
720
721static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
722 char *rauhtd_pl,
723 int ent_index)
724{
725 struct net_device *dev;
726 struct neighbour *n;
727 __be32 dipn;
728 u32 dip;
729 u16 rif;
730
731 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
732
733 if (!mlxsw_sp->rifs[rif]) {
734 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
735 return;
736 }
737
738 dipn = htonl(dip);
739 dev = mlxsw_sp->rifs[rif]->dev;
740 n = neigh_lookup(&arp_tbl, &dipn, dev);
741 if (!n) {
742 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
743 &dip);
744 return;
745 }
746
747 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
748 neigh_event_send(n, NULL);
749 neigh_release(n);
750}
751
752static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
753 char *rauhtd_pl,
754 int rec_index)
755{
756 u8 num_entries;
757 int i;
758
759 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
760 rec_index);
761 /* Hardware starts counting at 0, so add 1. */
762 num_entries++;
763
764 /* Each record consists of several neighbour entries. */
765 for (i = 0; i < num_entries; i++) {
766 int ent_index;
767
768 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
769 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
770 ent_index);
771 }
772
773}
774
775static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
776 char *rauhtd_pl, int rec_index)
777{
778 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
779 case MLXSW_REG_RAUHTD_TYPE_IPV4:
780 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
781 rec_index);
782 break;
783 case MLXSW_REG_RAUHTD_TYPE_IPV6:
784 WARN_ON_ONCE(1);
785 break;
786 }
787}
788
42cdb338
AS
789static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
790{
791 u8 num_rec, last_rec_index, num_entries;
792
793 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
794 last_rec_index = num_rec - 1;
795
796 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
797 return false;
798 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
799 MLXSW_REG_RAUHTD_TYPE_IPV6)
800 return true;
801
802 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
803 last_rec_index);
804 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
805 return true;
806 return false;
807}
808
b2157149 809static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
c723c735 810{
c723c735
YG
811 char *rauhtd_pl;
812 u8 num_rec;
813 int i, err;
814
815 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
816 if (!rauhtd_pl)
b2157149 817 return -ENOMEM;
c723c735
YG
818
819 /* Make sure the neighbour's netdev isn't removed in the
820 * process.
821 */
822 rtnl_lock();
823 do {
824 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
825 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
826 rauhtd_pl);
827 if (err) {
828 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
829 break;
830 }
831 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
832 for (i = 0; i < num_rec; i++)
833 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
834 i);
42cdb338 835 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
c723c735
YG
836 rtnl_unlock();
837
838 kfree(rauhtd_pl);
b2157149
YG
839 return err;
840}
841
842static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
843{
844 struct mlxsw_sp_neigh_entry *neigh_entry;
845
846 /* Take RTNL mutex here to prevent lists from changes */
847 rtnl_lock();
848 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
8a0b7275 849 nexthop_neighs_list_node)
b2157149
YG
850 /* If this neigh have nexthops, make the kernel think this neigh
851 * is active regardless of the traffic.
852 */
8a0b7275 853 neigh_event_send(neigh_entry->key.n, NULL);
b2157149
YG
854 rtnl_unlock();
855}
856
857static void
858mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
859{
860 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
861
862 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
863 msecs_to_jiffies(interval));
864}
865
866static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
867{
868 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
869 router.neighs_update.dw.work);
870 int err;
871
872 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
873 if (err)
874 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
875
876 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
877
c723c735
YG
878 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
879}
880
0b2361d9
YG
881static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
882{
883 struct mlxsw_sp_neigh_entry *neigh_entry;
884 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
885 router.nexthop_probe_dw.work);
886
887 /* Iterate over nexthop neighbours, find those who are unresolved and
888 * send arp on them. This solves the chicken-egg problem when
889 * the nexthop wouldn't get offloaded until the neighbor is resolved
890 * but it wouldn't get resolved ever in case traffic is flowing in HW
891 * using different nexthop.
892 *
893 * Take RTNL mutex here to prevent lists from changes.
894 */
895 rtnl_lock();
896 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
8a0b7275 897 nexthop_neighs_list_node)
01b1aa35 898 if (!neigh_entry->connected)
33b1341c 899 neigh_event_send(neigh_entry->key.n, NULL);
0b2361d9
YG
900 rtnl_unlock();
901
902 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
903 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
904}
905
a7ff87ac
JP
906static void
907mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
908 struct mlxsw_sp_neigh_entry *neigh_entry,
909 bool removing);
910
5c8802f1
IS
911static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
912{
913 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
914 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
915}
916
917static void
918mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
919 struct mlxsw_sp_neigh_entry *neigh_entry,
920 enum mlxsw_reg_rauht_op op)
a6bf9e93 921{
33b1341c 922 struct neighbour *n = neigh_entry->key.n;
5c8802f1 923 u32 dip = ntohl(*((__be32 *) n->primary_key));
a6bf9e93 924 char rauht_pl[MLXSW_REG_RAUHT_LEN];
5c8802f1
IS
925
926 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
927 dip);
928 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
929}
930
931static void
932mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
933 struct mlxsw_sp_neigh_entry *neigh_entry,
934 bool adding)
935{
936 if (!adding && !neigh_entry->connected)
937 return;
938 neigh_entry->connected = adding;
939 if (neigh_entry->key.n->tbl == &arp_tbl)
940 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
941 mlxsw_sp_rauht_op(adding));
942 else
943 WARN_ON_ONCE(1);
944}
945
946struct mlxsw_sp_neigh_event_work {
947 struct work_struct work;
948 struct mlxsw_sp *mlxsw_sp;
949 struct neighbour *n;
950};
951
952static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
953{
954 struct mlxsw_sp_neigh_event_work *neigh_work =
955 container_of(work, struct mlxsw_sp_neigh_event_work, work);
956 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
957 struct mlxsw_sp_neigh_entry *neigh_entry;
958 struct neighbour *n = neigh_work->n;
959 unsigned char ha[ETH_ALEN];
a6bf9e93 960 bool entry_connected;
93a87e5e 961 u8 nud_state, dead;
a6bf9e93 962
5c8802f1
IS
963 /* If these parameters are changed after we release the lock,
964 * then we are guaranteed to receive another event letting us
965 * know about it.
966 */
a6bf9e93 967 read_lock_bh(&n->lock);
5c8802f1 968 memcpy(ha, n->ha, ETH_ALEN);
a6bf9e93 969 nud_state = n->nud_state;
93a87e5e 970 dead = n->dead;
a6bf9e93
YG
971 read_unlock_bh(&n->lock);
972
5c8802f1 973 rtnl_lock();
93a87e5e 974 entry_connected = nud_state & NUD_VALID && !dead;
5c8802f1
IS
975 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
976 if (!entry_connected && !neigh_entry)
977 goto out;
978 if (!neigh_entry) {
979 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
980 if (IS_ERR(neigh_entry))
981 goto out;
a6bf9e93
YG
982 }
983
5c8802f1
IS
984 memcpy(neigh_entry->ha, ha, ETH_ALEN);
985 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
986 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
987
988 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
989 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
990
991out:
992 rtnl_unlock();
a6bf9e93 993 neigh_release(n);
5c8802f1 994 kfree(neigh_work);
a6bf9e93
YG
995}
996
e7322638
JP
997int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
998 unsigned long event, void *ptr)
c723c735 999{
5c8802f1 1000 struct mlxsw_sp_neigh_event_work *neigh_work;
c723c735
YG
1001 struct mlxsw_sp_port *mlxsw_sp_port;
1002 struct mlxsw_sp *mlxsw_sp;
1003 unsigned long interval;
1004 struct neigh_parms *p;
a6bf9e93 1005 struct neighbour *n;
c723c735
YG
1006
1007 switch (event) {
1008 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1009 p = ptr;
1010
1011 /* We don't care about changes in the default table. */
1012 if (!p->dev || p->tbl != &arp_tbl)
1013 return NOTIFY_DONE;
1014
1015 /* We are in atomic context and can't take RTNL mutex,
1016 * so use RCU variant to walk the device chain.
1017 */
1018 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1019 if (!mlxsw_sp_port)
1020 return NOTIFY_DONE;
1021
1022 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1023 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1024 mlxsw_sp->router.neighs_update.interval = interval;
1025
1026 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1027 break;
a6bf9e93
YG
1028 case NETEVENT_NEIGH_UPDATE:
1029 n = ptr;
a6bf9e93
YG
1030
1031 if (n->tbl != &arp_tbl)
1032 return NOTIFY_DONE;
1033
5c8802f1 1034 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
a6bf9e93
YG
1035 if (!mlxsw_sp_port)
1036 return NOTIFY_DONE;
1037
5c8802f1
IS
1038 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1039 if (!neigh_work) {
a6bf9e93 1040 mlxsw_sp_port_dev_put(mlxsw_sp_port);
5c8802f1 1041 return NOTIFY_BAD;
a6bf9e93 1042 }
5c8802f1
IS
1043
1044 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1045 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1046 neigh_work->n = n;
a6bf9e93
YG
1047
1048 /* Take a reference to ensure the neighbour won't be
1049 * destructed until we drop the reference in delayed
1050 * work.
1051 */
1052 neigh_clone(n);
5c8802f1
IS
1053 mlxsw_core_schedule_work(&neigh_work->work);
1054 mlxsw_sp_port_dev_put(mlxsw_sp_port);
a6bf9e93 1055 break;
c723c735
YG
1056 }
1057
1058 return NOTIFY_DONE;
1059}
1060
6cf3c971
JP
1061static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1062{
c723c735
YG
1063 int err;
1064
1065 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1066 &mlxsw_sp_neigh_ht_params);
1067 if (err)
1068 return err;
1069
1070 /* Initialize the polling interval according to the default
1071 * table.
1072 */
1073 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1074
0b2361d9 1075 /* Create the delayed works for the activity_update */
c723c735
YG
1076 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1077 mlxsw_sp_router_neighs_update_work);
0b2361d9
YG
1078 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1079 mlxsw_sp_router_probe_unresolved_nexthops);
c723c735 1080 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
0b2361d9 1081 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
c723c735 1082 return 0;
6cf3c971
JP
1083}
1084
1085static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1086{
c723c735 1087 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
0b2361d9 1088 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
6cf3c971
JP
1089 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1090}
1091
c53b8e1b
IS
1092struct mlxsw_sp_nexthop_key {
1093 struct fib_nh *fib_nh;
1094};
1095
a7ff87ac
JP
1096struct mlxsw_sp_nexthop {
1097 struct list_head neigh_list_node; /* member of neigh entry list */
1098 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1099 * this belongs to
1100 */
c53b8e1b
IS
1101 struct rhash_head ht_node;
1102 struct mlxsw_sp_nexthop_key key;
b8399a1e 1103 struct mlxsw_sp_rif *r;
a7ff87ac
JP
1104 u8 should_offload:1, /* set indicates this neigh is connected and
1105 * should be put to KVD linear area of this group.
1106 */
1107 offloaded:1, /* set in case the neigh is actually put into
1108 * KVD linear area of this group.
1109 */
1110 update:1; /* set indicates that MAC of this neigh should be
1111 * updated in HW
1112 */
1113 struct mlxsw_sp_neigh_entry *neigh_entry;
1114};
1115
e9ad5e7d
IS
1116struct mlxsw_sp_nexthop_group_key {
1117 struct fib_info *fi;
1118};
1119
a7ff87ac 1120struct mlxsw_sp_nexthop_group {
e9ad5e7d 1121 struct rhash_head ht_node;
a7ff87ac 1122 struct list_head fib_list; /* list of fib entries that use this group */
e9ad5e7d 1123 struct mlxsw_sp_nexthop_group_key key;
b3e8d1eb
IS
1124 u8 adj_index_valid:1,
1125 gateway:1; /* routes using the group use a gateway */
a7ff87ac
JP
1126 u32 adj_index;
1127 u16 ecmp_size;
1128 u16 count;
1129 struct mlxsw_sp_nexthop nexthops[0];
b8399a1e 1130#define nh_rif nexthops[0].r
a7ff87ac
JP
1131};
1132
e9ad5e7d
IS
1133static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1134 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1135 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1136 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1137};
1138
1139static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1140 struct mlxsw_sp_nexthop_group *nh_grp)
1141{
1142 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
1143 &nh_grp->ht_node,
1144 mlxsw_sp_nexthop_group_ht_params);
1145}
1146
1147static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1148 struct mlxsw_sp_nexthop_group *nh_grp)
1149{
1150 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
1151 &nh_grp->ht_node,
1152 mlxsw_sp_nexthop_group_ht_params);
1153}
1154
1155static struct mlxsw_sp_nexthop_group *
1156mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1157 struct mlxsw_sp_nexthop_group_key key)
1158{
1159 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
1160 mlxsw_sp_nexthop_group_ht_params);
1161}
1162
c53b8e1b
IS
1163static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1164 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1165 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1166 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1167};
1168
1169static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1170 struct mlxsw_sp_nexthop *nh)
1171{
1172 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
1173 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1174}
1175
1176static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1177 struct mlxsw_sp_nexthop *nh)
1178{
1179 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
1180 mlxsw_sp_nexthop_ht_params);
1181}
1182
ad178c8e
IS
1183static struct mlxsw_sp_nexthop *
1184mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1185 struct mlxsw_sp_nexthop_key key)
1186{
1187 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
1188 mlxsw_sp_nexthop_ht_params);
1189}
1190
a7ff87ac
JP
1191static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1192 struct mlxsw_sp_vr *vr,
1193 u32 adj_index, u16 ecmp_size,
1194 u32 new_adj_index,
1195 u16 new_ecmp_size)
1196{
1197 char raleu_pl[MLXSW_REG_RALEU_LEN];
1198
1a9234e6
IS
1199 mlxsw_reg_raleu_pack(raleu_pl,
1200 (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
1201 adj_index, ecmp_size, new_adj_index,
1202 new_ecmp_size);
a7ff87ac
JP
1203 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1204}
1205
1206static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1207 struct mlxsw_sp_nexthop_group *nh_grp,
1208 u32 old_adj_index, u16 old_ecmp_size)
1209{
1210 struct mlxsw_sp_fib_entry *fib_entry;
1211 struct mlxsw_sp_vr *vr = NULL;
1212 int err;
1213
1214 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1215 if (vr == fib_entry->vr)
1216 continue;
1217 vr = fib_entry->vr;
1218 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
1219 old_adj_index,
1220 old_ecmp_size,
1221 nh_grp->adj_index,
1222 nh_grp->ecmp_size);
1223 if (err)
1224 return err;
1225 }
1226 return 0;
1227}
1228
1229static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1230 struct mlxsw_sp_nexthop *nh)
1231{
1232 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1233 char ratr_pl[MLXSW_REG_RATR_LEN];
1234
1235 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1236 true, adj_index, neigh_entry->rif);
1237 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1238 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1239}
1240
1241static int
1242mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
a59b7e02
IS
1243 struct mlxsw_sp_nexthop_group *nh_grp,
1244 bool reallocate)
a7ff87ac
JP
1245{
1246 u32 adj_index = nh_grp->adj_index; /* base */
1247 struct mlxsw_sp_nexthop *nh;
1248 int i;
1249 int err;
1250
1251 for (i = 0; i < nh_grp->count; i++) {
1252 nh = &nh_grp->nexthops[i];
1253
1254 if (!nh->should_offload) {
1255 nh->offloaded = 0;
1256 continue;
1257 }
1258
a59b7e02 1259 if (nh->update || reallocate) {
a7ff87ac
JP
1260 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1261 adj_index, nh);
1262 if (err)
1263 return err;
1264 nh->update = 0;
1265 nh->offloaded = 1;
1266 }
1267 adj_index++;
1268 }
1269 return 0;
1270}
1271
1272static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1273 struct mlxsw_sp_fib_entry *fib_entry);
1274
1275static int
1276mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1277 struct mlxsw_sp_nexthop_group *nh_grp)
1278{
1279 struct mlxsw_sp_fib_entry *fib_entry;
1280 int err;
1281
1282 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1283 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1284 if (err)
1285 return err;
1286 }
1287 return 0;
1288}
1289
1290static void
1291mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1292 struct mlxsw_sp_nexthop_group *nh_grp)
1293{
1294 struct mlxsw_sp_nexthop *nh;
1295 bool offload_change = false;
1296 u32 adj_index;
1297 u16 ecmp_size = 0;
1298 bool old_adj_index_valid;
1299 u32 old_adj_index;
1300 u16 old_ecmp_size;
1301 int ret;
1302 int i;
1303 int err;
1304
b3e8d1eb
IS
1305 if (!nh_grp->gateway) {
1306 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1307 return;
1308 }
1309
a7ff87ac
JP
1310 for (i = 0; i < nh_grp->count; i++) {
1311 nh = &nh_grp->nexthops[i];
1312
1313 if (nh->should_offload ^ nh->offloaded) {
1314 offload_change = true;
1315 if (nh->should_offload)
1316 nh->update = 1;
1317 }
1318 if (nh->should_offload)
1319 ecmp_size++;
1320 }
1321 if (!offload_change) {
1322 /* Nothing was added or removed, so no need to reallocate. Just
1323 * update MAC on existing adjacency indexes.
1324 */
a59b7e02
IS
1325 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1326 false);
a7ff87ac
JP
1327 if (err) {
1328 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1329 goto set_trap;
1330 }
1331 return;
1332 }
1333 if (!ecmp_size)
1334 /* No neigh of this group is connected so we just set
1335 * the trap and let everthing flow through kernel.
1336 */
1337 goto set_trap;
1338
1339 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1340 if (ret < 0) {
1341 /* We ran out of KVD linear space, just set the
1342 * trap and let everything flow through kernel.
1343 */
1344 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1345 goto set_trap;
1346 }
1347 adj_index = ret;
1348 old_adj_index_valid = nh_grp->adj_index_valid;
1349 old_adj_index = nh_grp->adj_index;
1350 old_ecmp_size = nh_grp->ecmp_size;
1351 nh_grp->adj_index_valid = 1;
1352 nh_grp->adj_index = adj_index;
1353 nh_grp->ecmp_size = ecmp_size;
a59b7e02 1354 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
a7ff87ac
JP
1355 if (err) {
1356 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1357 goto set_trap;
1358 }
1359
1360 if (!old_adj_index_valid) {
1361 /* The trap was set for fib entries, so we have to call
1362 * fib entry update to unset it and use adjacency index.
1363 */
1364 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1365 if (err) {
1366 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1367 goto set_trap;
1368 }
1369 return;
1370 }
1371
1372 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1373 old_adj_index, old_ecmp_size);
1374 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1375 if (err) {
1376 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1377 goto set_trap;
1378 }
1379 return;
1380
1381set_trap:
1382 old_adj_index_valid = nh_grp->adj_index_valid;
1383 nh_grp->adj_index_valid = 0;
1384 for (i = 0; i < nh_grp->count; i++) {
1385 nh = &nh_grp->nexthops[i];
1386 nh->offloaded = 0;
1387 }
1388 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1389 if (err)
1390 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1391 if (old_adj_index_valid)
1392 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1393}
1394
1395static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1396 bool removing)
1397{
1398 if (!removing && !nh->should_offload)
1399 nh->should_offload = 1;
1400 else if (removing && nh->offloaded)
1401 nh->should_offload = 0;
1402 nh->update = 1;
1403}
1404
1405static void
1406mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1407 struct mlxsw_sp_neigh_entry *neigh_entry,
1408 bool removing)
1409{
1410 struct mlxsw_sp_nexthop *nh;
1411
a7ff87ac
JP
1412 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1413 neigh_list_node) {
1414 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1415 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1416 }
a7ff87ac
JP
1417}
1418
a8c97014
IS
1419static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1420 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1421{
1422 struct mlxsw_sp_neigh_entry *neigh_entry;
a8c97014 1423 struct fib_nh *fib_nh = nh->key.fib_nh;
a7ff87ac 1424 struct neighbour *n;
93a87e5e 1425 u8 nud_state, dead;
c53b8e1b
IS
1426 int err;
1427
ad178c8e 1428 if (!nh->nh_grp->gateway || nh->neigh_entry)
b8399a1e
IS
1429 return 0;
1430
33b1341c
JP
1431 /* Take a reference of neigh here ensuring that neigh would
1432 * not be detructed before the nexthop entry is finished.
1433 * The reference is taken either in neigh_lookup() or
fd76d910 1434 * in neigh_create() in case n is not found.
33b1341c 1435 */
a8c97014 1436 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
33b1341c 1437 if (!n) {
a8c97014
IS
1438 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1439 if (IS_ERR(n))
1440 return PTR_ERR(n);
a7ff87ac 1441 neigh_event_send(n, NULL);
33b1341c
JP
1442 }
1443 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1444 if (!neigh_entry) {
5c8802f1
IS
1445 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1446 if (IS_ERR(neigh_entry)) {
c53b8e1b
IS
1447 err = -EINVAL;
1448 goto err_neigh_entry_create;
5c8802f1 1449 }
a7ff87ac 1450 }
b2157149
YG
1451
1452 /* If that is the first nexthop connected to that neigh, add to
1453 * nexthop_neighs_list
1454 */
1455 if (list_empty(&neigh_entry->nexthop_list))
1456 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1457 &mlxsw_sp->router.nexthop_neighs_list);
1458
a7ff87ac
JP
1459 nh->neigh_entry = neigh_entry;
1460 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1461 read_lock_bh(&n->lock);
1462 nud_state = n->nud_state;
93a87e5e 1463 dead = n->dead;
a7ff87ac 1464 read_unlock_bh(&n->lock);
93a87e5e 1465 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
a7ff87ac
JP
1466
1467 return 0;
c53b8e1b
IS
1468
1469err_neigh_entry_create:
1470 neigh_release(n);
c53b8e1b 1471 return err;
a7ff87ac
JP
1472}
1473
a8c97014
IS
1474static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1475 struct mlxsw_sp_nexthop *nh)
a7ff87ac
JP
1476{
1477 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
a8c97014 1478 struct neighbour *n;
a7ff87ac 1479
b8399a1e 1480 if (!neigh_entry)
a8c97014
IS
1481 return;
1482 n = neigh_entry->key.n;
b8399a1e 1483
58312125 1484 __mlxsw_sp_nexthop_neigh_update(nh, true);
a7ff87ac 1485 list_del(&nh->neigh_list_node);
e58be79e 1486 nh->neigh_entry = NULL;
b2157149
YG
1487
1488 /* If that is the last nexthop connected to that neigh, remove from
1489 * nexthop_neighs_list
1490 */
e58be79e
IS
1491 if (list_empty(&neigh_entry->nexthop_list))
1492 list_del(&neigh_entry->nexthop_neighs_list_node);
b2157149 1493
5c8802f1
IS
1494 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1495 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1496
1497 neigh_release(n);
a8c97014 1498}
c53b8e1b 1499
a8c97014
IS
1500static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1501 struct mlxsw_sp_nexthop_group *nh_grp,
1502 struct mlxsw_sp_nexthop *nh,
1503 struct fib_nh *fib_nh)
1504{
1505 struct net_device *dev = fib_nh->nh_dev;
1506 struct mlxsw_sp_rif *r;
1507 int err;
1508
1509 nh->nh_grp = nh_grp;
1510 nh->key.fib_nh = fib_nh;
1511 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1512 if (err)
1513 return err;
1514
1515 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1516 if (!r)
1517 return 0;
1518 nh->r = r;
1519
1520 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1521 if (err)
1522 goto err_nexthop_neigh_init;
1523
1524 return 0;
1525
1526err_nexthop_neigh_init:
1527 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1528 return err;
1529}
1530
1531static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1532 struct mlxsw_sp_nexthop *nh)
1533{
1534 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
c53b8e1b 1535 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
a7ff87ac
JP
1536}
1537
ad178c8e
IS
1538static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1539 unsigned long event, struct fib_nh *fib_nh)
1540{
1541 struct mlxsw_sp_nexthop_key key;
1542 struct mlxsw_sp_nexthop *nh;
1543 struct mlxsw_sp_rif *r;
1544
1545 if (mlxsw_sp->router.aborted)
1546 return;
1547
1548 key.fib_nh = fib_nh;
1549 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1550 if (WARN_ON_ONCE(!nh))
1551 return;
1552
1553 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1554 if (!r)
1555 return;
1556
1557 switch (event) {
1558 case FIB_EVENT_NH_ADD:
1559 nh->r = r;
1560 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1561 break;
1562 case FIB_EVENT_NH_DEL:
1563 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1564 nh->r = NULL;
1565 break;
1566 }
1567
1568 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1569}
1570
a7ff87ac
JP
1571static struct mlxsw_sp_nexthop_group *
1572mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1573{
1574 struct mlxsw_sp_nexthop_group *nh_grp;
1575 struct mlxsw_sp_nexthop *nh;
1576 struct fib_nh *fib_nh;
1577 size_t alloc_size;
1578 int i;
1579 int err;
1580
1581 alloc_size = sizeof(*nh_grp) +
1582 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1583 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1584 if (!nh_grp)
1585 return ERR_PTR(-ENOMEM);
1586 INIT_LIST_HEAD(&nh_grp->fib_list);
b3e8d1eb 1587 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
a7ff87ac 1588 nh_grp->count = fi->fib_nhs;
e9ad5e7d 1589 nh_grp->key.fi = fi;
a7ff87ac
JP
1590 for (i = 0; i < nh_grp->count; i++) {
1591 nh = &nh_grp->nexthops[i];
1592 fib_nh = &fi->fib_nh[i];
1593 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1594 if (err)
1595 goto err_nexthop_init;
1596 }
e9ad5e7d
IS
1597 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1598 if (err)
1599 goto err_nexthop_group_insert;
a7ff87ac
JP
1600 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1601 return nh_grp;
1602
e9ad5e7d 1603err_nexthop_group_insert:
a7ff87ac
JP
1604err_nexthop_init:
1605 for (i--; i >= 0; i--)
1606 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1607 kfree(nh_grp);
1608 return ERR_PTR(err);
1609}
1610
1611static void
1612mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1613 struct mlxsw_sp_nexthop_group *nh_grp)
1614{
1615 struct mlxsw_sp_nexthop *nh;
1616 int i;
1617
e9ad5e7d 1618 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
a7ff87ac
JP
1619 for (i = 0; i < nh_grp->count; i++) {
1620 nh = &nh_grp->nexthops[i];
1621 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1622 }
58312125
IS
1623 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1624 WARN_ON_ONCE(nh_grp->adj_index_valid);
a7ff87ac
JP
1625 kfree(nh_grp);
1626}
1627
a7ff87ac
JP
1628static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1629 struct mlxsw_sp_fib_entry *fib_entry,
1630 struct fib_info *fi)
1631{
e9ad5e7d 1632 struct mlxsw_sp_nexthop_group_key key;
a7ff87ac
JP
1633 struct mlxsw_sp_nexthop_group *nh_grp;
1634
e9ad5e7d
IS
1635 key.fi = fi;
1636 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
a7ff87ac
JP
1637 if (!nh_grp) {
1638 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1639 if (IS_ERR(nh_grp))
1640 return PTR_ERR(nh_grp);
1641 }
1642 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1643 fib_entry->nh_group = nh_grp;
1644 return 0;
1645}
1646
1647static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1648 struct mlxsw_sp_fib_entry *fib_entry)
1649{
1650 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1651
1652 list_del(&fib_entry->nexthop_group_node);
1653 if (!list_empty(&nh_grp->fib_list))
1654 return;
1655 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1656}
1657
013b20f9
IS
1658static bool
1659mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1660{
1661 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1662
1663 switch (fib_entry->type) {
1664 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1665 return !!nh_group->adj_index_valid;
1666 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
70ad3506 1667 return !!nh_group->nh_rif;
013b20f9
IS
1668 default:
1669 return false;
1670 }
1671}
1672
1673static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1674{
1675 fib_entry->offloaded = true;
1676
1677 switch (fib_entry->vr->proto) {
1678 case MLXSW_SP_L3_PROTO_IPV4:
1679 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1680 break;
1681 case MLXSW_SP_L3_PROTO_IPV6:
1682 WARN_ON_ONCE(1);
1683 }
1684}
1685
1686static void
1687mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1688{
1689 switch (fib_entry->vr->proto) {
1690 case MLXSW_SP_L3_PROTO_IPV4:
1691 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1692 break;
1693 case MLXSW_SP_L3_PROTO_IPV6:
1694 WARN_ON_ONCE(1);
1695 }
1696
1697 fib_entry->offloaded = false;
1698}
1699
1700static void
1701mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1702 enum mlxsw_reg_ralue_op op, int err)
1703{
1704 switch (op) {
1705 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1706 if (!fib_entry->offloaded)
1707 return;
1708 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1709 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1710 if (err)
1711 return;
1712 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1713 !fib_entry->offloaded)
1714 mlxsw_sp_fib_entry_offload_set(fib_entry);
1715 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1716 fib_entry->offloaded)
1717 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1718 return;
1719 default:
1720 return;
1721 }
1722}
1723
a7ff87ac
JP
1724static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1725 struct mlxsw_sp_fib_entry *fib_entry,
1726 enum mlxsw_reg_ralue_op op)
1727{
1728 char ralue_pl[MLXSW_REG_RALUE_LEN];
1729 u32 *p_dip = (u32 *) fib_entry->key.addr;
1730 struct mlxsw_sp_vr *vr = fib_entry->vr;
1731 enum mlxsw_reg_ralue_trap_action trap_action;
1732 u16 trap_id = 0;
1733 u32 adjacency_index = 0;
1734 u16 ecmp_size = 0;
1735
1736 /* In case the nexthop group adjacency index is valid, use it
1737 * with provided ECMP size. Otherwise, setup trap and pass
1738 * traffic to kernel.
1739 */
4b411477 1740 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
a7ff87ac
JP
1741 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1742 adjacency_index = fib_entry->nh_group->adj_index;
1743 ecmp_size = fib_entry->nh_group->ecmp_size;
1744 } else {
1745 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1746 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1747 }
1748
1a9234e6
IS
1749 mlxsw_reg_ralue_pack4(ralue_pl,
1750 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1751 vr->id, fib_entry->key.prefix_len, *p_dip);
a7ff87ac
JP
1752 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1753 adjacency_index, ecmp_size);
1754 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1755}
1756
61c503f9
JP
1757static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1758 struct mlxsw_sp_fib_entry *fib_entry,
1759 enum mlxsw_reg_ralue_op op)
1760{
b8399a1e 1761 struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
70ad3506 1762 enum mlxsw_reg_ralue_trap_action trap_action;
61c503f9
JP
1763 char ralue_pl[MLXSW_REG_RALUE_LEN];
1764 u32 *p_dip = (u32 *) fib_entry->key.addr;
1765 struct mlxsw_sp_vr *vr = fib_entry->vr;
70ad3506
IS
1766 u16 trap_id = 0;
1767 u16 rif = 0;
1768
1769 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1770 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1771 rif = r->rif;
1772 } else {
1773 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1774 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1775 }
61c503f9 1776
1a9234e6
IS
1777 mlxsw_reg_ralue_pack4(ralue_pl,
1778 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1779 vr->id, fib_entry->key.prefix_len, *p_dip);
70ad3506 1780 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
61c503f9
JP
1781 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1782}
1783
1784static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1785 struct mlxsw_sp_fib_entry *fib_entry,
1786 enum mlxsw_reg_ralue_op op)
1787{
1788 char ralue_pl[MLXSW_REG_RALUE_LEN];
1789 u32 *p_dip = (u32 *) fib_entry->key.addr;
1790 struct mlxsw_sp_vr *vr = fib_entry->vr;
1791
1a9234e6
IS
1792 mlxsw_reg_ralue_pack4(ralue_pl,
1793 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
1794 vr->id, fib_entry->key.prefix_len, *p_dip);
61c503f9
JP
1795 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1796 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1797}
1798
1799static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1800 struct mlxsw_sp_fib_entry *fib_entry,
1801 enum mlxsw_reg_ralue_op op)
1802{
1803 switch (fib_entry->type) {
1804 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
a7ff87ac 1805 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
61c503f9
JP
1806 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1807 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1808 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1809 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1810 }
1811 return -EINVAL;
1812}
1813
1814static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1815 struct mlxsw_sp_fib_entry *fib_entry,
1816 enum mlxsw_reg_ralue_op op)
1817{
013b20f9
IS
1818 int err = -EINVAL;
1819
61c503f9
JP
1820 switch (fib_entry->vr->proto) {
1821 case MLXSW_SP_L3_PROTO_IPV4:
013b20f9
IS
1822 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1823 break;
61c503f9 1824 case MLXSW_SP_L3_PROTO_IPV6:
013b20f9 1825 return err;
61c503f9 1826 }
013b20f9
IS
1827 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
1828 return err;
61c503f9
JP
1829}
1830
1831static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1832 struct mlxsw_sp_fib_entry *fib_entry)
1833{
7146da31
JP
1834 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1835 MLXSW_REG_RALUE_OP_WRITE_WRITE);
61c503f9
JP
1836}
1837
1838static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1839 struct mlxsw_sp_fib_entry *fib_entry)
1840{
1841 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1842 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1843}
1844
61c503f9 1845static int
013b20f9
IS
1846mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
1847 const struct fib_entry_notifier_info *fen_info,
1848 struct mlxsw_sp_fib_entry *fib_entry)
61c503f9 1849{
b45f64d1 1850 struct fib_info *fi = fen_info->fi;
61c503f9 1851
b45f64d1 1852 if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
61c503f9
JP
1853 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1854 return 0;
1855 }
b45f64d1 1856 if (fen_info->type != RTN_UNICAST)
61c503f9 1857 return -EINVAL;
b8399a1e 1858 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
61c503f9 1859 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
b8399a1e 1860 else
b45f64d1 1861 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
b45f64d1 1862 return 0;
a7ff87ac
JP
1863}
1864
5b004412
JP
1865static struct mlxsw_sp_fib_entry *
1866mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
b45f64d1 1867 const struct fib_entry_notifier_info *fen_info)
61c503f9 1868{
61c503f9 1869 struct mlxsw_sp_fib_entry *fib_entry;
b45f64d1 1870 struct fib_info *fi = fen_info->fi;
61c503f9
JP
1871 struct mlxsw_sp_vr *vr;
1872 int err;
1873
b45f64d1 1874 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
61c503f9
JP
1875 MLXSW_SP_L3_PROTO_IPV4);
1876 if (IS_ERR(vr))
5b004412 1877 return ERR_CAST(vr);
61c503f9 1878
b45f64d1
JP
1879 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1880 sizeof(fen_info->dst),
1881 fen_info->dst_len, fi->fib_dev);
5b004412
JP
1882 if (fib_entry) {
1883 /* Already exists, just take a reference */
1884 fib_entry->ref_count++;
1885 return fib_entry;
1886 }
b45f64d1
JP
1887 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
1888 sizeof(fen_info->dst),
1889 fen_info->dst_len, fi->fib_dev);
61c503f9
JP
1890 if (!fib_entry) {
1891 err = -ENOMEM;
1892 goto err_fib_entry_create;
1893 }
1894 fib_entry->vr = vr;
5b004412 1895 fib_entry->ref_count = 1;
61c503f9 1896
013b20f9 1897 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
61c503f9 1898 if (err)
013b20f9 1899 goto err_fib4_entry_type_set;
61c503f9 1900
b8399a1e
IS
1901 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
1902 if (err)
1903 goto err_nexthop_group_get;
1904
5b004412
JP
1905 return fib_entry;
1906
b8399a1e 1907err_nexthop_group_get:
013b20f9 1908err_fib4_entry_type_set:
5b004412
JP
1909 mlxsw_sp_fib_entry_destroy(fib_entry);
1910err_fib_entry_create:
1911 mlxsw_sp_vr_put(mlxsw_sp, vr);
1912
1913 return ERR_PTR(err);
1914}
1915
1916static struct mlxsw_sp_fib_entry *
1917mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
b45f64d1 1918 const struct fib_entry_notifier_info *fen_info)
5b004412
JP
1919{
1920 struct mlxsw_sp_vr *vr;
1921
b45f64d1
JP
1922 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
1923 MLXSW_SP_L3_PROTO_IPV4);
5b004412
JP
1924 if (!vr)
1925 return NULL;
1926
b45f64d1
JP
1927 return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
1928 sizeof(fen_info->dst),
1929 fen_info->dst_len,
1930 fen_info->fi->fib_dev);
5b004412
JP
1931}
1932
1a9234e6
IS
1933static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
1934 struct mlxsw_sp_fib_entry *fib_entry)
5b004412
JP
1935{
1936 struct mlxsw_sp_vr *vr = fib_entry->vr;
1937
1938 if (--fib_entry->ref_count == 0) {
b8399a1e 1939 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
5b004412
JP
1940 mlxsw_sp_fib_entry_destroy(fib_entry);
1941 }
1942 mlxsw_sp_vr_put(mlxsw_sp, vr);
1943}
1944
b45f64d1
JP
1945static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
1946 struct mlxsw_sp_fib_entry *fib_entry)
5b004412 1947{
b45f64d1 1948 unsigned int last_ref_count;
5b004412 1949
b45f64d1
JP
1950 do {
1951 last_ref_count = fib_entry->ref_count;
1952 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
1953 } while (last_ref_count != 1);
61c503f9
JP
1954}
1955
b45f64d1
JP
1956static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
1957 struct fib_entry_notifier_info *fen_info)
61c503f9 1958{
61c503f9
JP
1959 struct mlxsw_sp_fib_entry *fib_entry;
1960 struct mlxsw_sp_vr *vr;
1961 int err;
1962
b45f64d1
JP
1963 if (mlxsw_sp->router.aborted)
1964 return 0;
1965
1966 fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
1967 if (IS_ERR(fib_entry)) {
1968 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
1969 return PTR_ERR(fib_entry);
1970 }
61c503f9 1971
5b004412
JP
1972 if (fib_entry->ref_count != 1)
1973 return 0;
1974
61c503f9 1975 vr = fib_entry->vr;
5b004412 1976 err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
b45f64d1
JP
1977 if (err) {
1978 dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
61c503f9 1979 goto err_fib_entry_insert;
b45f64d1
JP
1980 }
1981 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
61c503f9
JP
1982 if (err)
1983 goto err_fib_entry_add;
1984 return 0;
1985
1986err_fib_entry_add:
1987 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1988err_fib_entry_insert:
5b004412 1989 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
61c503f9
JP
1990 return err;
1991}
1992
37956d78
JP
1993static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
1994 struct fib_entry_notifier_info *fen_info)
61c503f9 1995{
61c503f9 1996 struct mlxsw_sp_fib_entry *fib_entry;
61c503f9 1997
b45f64d1 1998 if (mlxsw_sp->router.aborted)
37956d78 1999 return;
b45f64d1
JP
2000
2001 fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
37956d78
JP
2002 if (!fib_entry)
2003 return;
5b004412
JP
2004
2005 if (fib_entry->ref_count == 1) {
2006 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2007 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
2008 }
2009
2010 mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
61c503f9 2011}
b45f64d1
JP
2012
2013static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2014{
2015 char ralta_pl[MLXSW_REG_RALTA_LEN];
2016 char ralst_pl[MLXSW_REG_RALST_LEN];
2017 char raltb_pl[MLXSW_REG_RALTB_LEN];
2018 char ralue_pl[MLXSW_REG_RALUE_LEN];
2019 int err;
2020
2021 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2022 MLXSW_SP_LPM_TREE_MIN);
2023 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2024 if (err)
2025 return err;
2026
2027 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2028 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2029 if (err)
2030 return err;
2031
19271c1a
JP
2032 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2033 MLXSW_SP_LPM_TREE_MIN);
b45f64d1
JP
2034 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
2035 if (err)
2036 return err;
2037
2038 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2039 MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
2040 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2041 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2042}
2043
ac571de9 2044static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
b45f64d1 2045{
b45f64d1
JP
2046 struct mlxsw_sp_fib_entry *fib_entry;
2047 struct mlxsw_sp_fib_entry *tmp;
2048 struct mlxsw_sp_vr *vr;
2049 int i;
b45f64d1 2050
c1a38311 2051 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
b45f64d1 2052 vr = &mlxsw_sp->router.vrs[i];
ac571de9 2053
b45f64d1
JP
2054 if (!vr->used)
2055 continue;
2056
2057 list_for_each_entry_safe(fib_entry, tmp,
2058 &vr->fib->entry_list, list) {
2059 bool do_break = &tmp->list == &vr->fib->entry_list;
2060
2061 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2062 mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
2063 fib_entry);
2064 mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
2065 if (do_break)
2066 break;
2067 }
2068 }
ac571de9
IS
2069}
2070
2071static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2072{
2073 int err;
2074
d331d303
IS
2075 if (mlxsw_sp->router.aborted)
2076 return;
2077 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
ac571de9 2078 mlxsw_sp_router_fib_flush(mlxsw_sp);
b45f64d1
JP
2079 mlxsw_sp->router.aborted = true;
2080 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2081 if (err)
2082 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2083}
2084
2085static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
2086{
b45f64d1 2087 char rgcr_pl[MLXSW_REG_RGCR_LEN];
c1a38311 2088 u64 max_rifs;
b45f64d1
JP
2089 int err;
2090
c1a38311 2091 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
b45f64d1
JP
2092 return -EIO;
2093
c1a38311
JP
2094 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2095 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
2096 GFP_KERNEL);
b45f64d1
JP
2097 if (!mlxsw_sp->rifs)
2098 return -ENOMEM;
2099
2100 mlxsw_reg_rgcr_pack(rgcr_pl, true);
c1a38311 2101 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
b45f64d1
JP
2102 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
2103 if (err)
2104 goto err_rgcr_fail;
2105
2106 return 0;
2107
2108err_rgcr_fail:
2109 kfree(mlxsw_sp->rifs);
2110 return err;
2111}
2112
2113static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
2114{
b45f64d1
JP
2115 char rgcr_pl[MLXSW_REG_RGCR_LEN];
2116 int i;
2117
2118 mlxsw_reg_rgcr_pack(rgcr_pl, false);
2119 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
2120
c1a38311 2121 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
b45f64d1
JP
2122 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2123
2124 kfree(mlxsw_sp->rifs);
2125}
2126
3057224e 2127struct mlxsw_sp_fib_event_work {
a0e4761d 2128 struct work_struct work;
ad178c8e
IS
2129 union {
2130 struct fib_entry_notifier_info fen_info;
2131 struct fib_nh_notifier_info fnh_info;
2132 };
3057224e
IS
2133 struct mlxsw_sp *mlxsw_sp;
2134 unsigned long event;
2135};
2136
2137static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
b45f64d1 2138{
3057224e 2139 struct mlxsw_sp_fib_event_work *fib_work =
a0e4761d 2140 container_of(work, struct mlxsw_sp_fib_event_work, work);
3057224e 2141 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
b45f64d1
JP
2142 int err;
2143
3057224e
IS
2144 /* Protect internal structures from changes */
2145 rtnl_lock();
2146 switch (fib_work->event) {
b45f64d1 2147 case FIB_EVENT_ENTRY_ADD:
3057224e 2148 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info);
b45f64d1
JP
2149 if (err)
2150 mlxsw_sp_router_fib4_abort(mlxsw_sp);
3057224e 2151 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2152 break;
2153 case FIB_EVENT_ENTRY_DEL:
3057224e
IS
2154 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2155 fib_info_put(fib_work->fen_info.fi);
b45f64d1
JP
2156 break;
2157 case FIB_EVENT_RULE_ADD: /* fall through */
2158 case FIB_EVENT_RULE_DEL:
2159 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2160 break;
ad178c8e
IS
2161 case FIB_EVENT_NH_ADD: /* fall through */
2162 case FIB_EVENT_NH_DEL:
2163 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2164 fib_work->fnh_info.fib_nh);
2165 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2166 break;
b45f64d1 2167 }
3057224e
IS
2168 rtnl_unlock();
2169 kfree(fib_work);
2170}
2171
2172/* Called with rcu_read_lock() */
2173static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2174 unsigned long event, void *ptr)
2175{
2176 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2177 struct mlxsw_sp_fib_event_work *fib_work;
2178 struct fib_notifier_info *info = ptr;
2179
2180 if (!net_eq(info->net, &init_net))
2181 return NOTIFY_DONE;
2182
2183 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2184 if (WARN_ON(!fib_work))
2185 return NOTIFY_BAD;
2186
a0e4761d 2187 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
3057224e
IS
2188 fib_work->mlxsw_sp = mlxsw_sp;
2189 fib_work->event = event;
2190
2191 switch (event) {
2192 case FIB_EVENT_ENTRY_ADD: /* fall through */
2193 case FIB_EVENT_ENTRY_DEL:
2194 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2195 /* Take referece on fib_info to prevent it from being
2196 * freed while work is queued. Release it afterwards.
2197 */
2198 fib_info_hold(fib_work->fen_info.fi);
2199 break;
ad178c8e
IS
2200 case FIB_EVENT_NH_ADD: /* fall through */
2201 case FIB_EVENT_NH_DEL:
2202 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2203 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2204 break;
3057224e
IS
2205 }
2206
a0e4761d 2207 mlxsw_core_schedule_work(&fib_work->work);
3057224e 2208
b45f64d1
JP
2209 return NOTIFY_DONE;
2210}
2211
c3852ef7
IS
2212static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
2213{
2214 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2215
2216 /* Flush pending FIB notifications and then flush the device's
2217 * table before requesting another dump. The FIB notification
2218 * block is unregistered, so no need to take RTNL.
2219 */
2220 mlxsw_core_flush_owq();
2221 mlxsw_sp_router_fib_flush(mlxsw_sp);
2222}
2223
b45f64d1
JP
2224int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
2225{
2226 int err;
2227
2228 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
b45f64d1
JP
2229 err = __mlxsw_sp_router_init(mlxsw_sp);
2230 if (err)
2231 return err;
2232
c53b8e1b
IS
2233 err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
2234 &mlxsw_sp_nexthop_ht_params);
2235 if (err)
2236 goto err_nexthop_ht_init;
2237
e9ad5e7d
IS
2238 err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
2239 &mlxsw_sp_nexthop_group_ht_params);
2240 if (err)
2241 goto err_nexthop_group_ht_init;
2242
b45f64d1
JP
2243 mlxsw_sp_lpm_init(mlxsw_sp);
2244 err = mlxsw_sp_vrs_init(mlxsw_sp);
2245 if (err)
2246 goto err_vrs_init;
2247
8c9583a8 2248 err = mlxsw_sp_neigh_init(mlxsw_sp);
b45f64d1
JP
2249 if (err)
2250 goto err_neigh_init;
2251
2252 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
c3852ef7
IS
2253 err = register_fib_notifier(&mlxsw_sp->fib_nb,
2254 mlxsw_sp_router_fib_dump_flush);
2255 if (err)
2256 goto err_register_fib_notifier;
2257
b45f64d1
JP
2258 return 0;
2259
c3852ef7
IS
2260err_register_fib_notifier:
2261 mlxsw_sp_neigh_fini(mlxsw_sp);
b45f64d1
JP
2262err_neigh_init:
2263 mlxsw_sp_vrs_fini(mlxsw_sp);
2264err_vrs_init:
e9ad5e7d
IS
2265 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
2266err_nexthop_group_ht_init:
c53b8e1b
IS
2267 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
2268err_nexthop_ht_init:
b45f64d1
JP
2269 __mlxsw_sp_router_fini(mlxsw_sp);
2270 return err;
2271}
2272
2273void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
2274{
2275 unregister_fib_notifier(&mlxsw_sp->fib_nb);
2276 mlxsw_sp_neigh_fini(mlxsw_sp);
2277 mlxsw_sp_vrs_fini(mlxsw_sp);
e9ad5e7d 2278 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
c53b8e1b 2279 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
b45f64d1
JP
2280 __mlxsw_sp_router_fini(mlxsw_sp);
2281}