]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/lib/librte_lpm/rte_lpm.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_lpm / rte_lpm.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7c673cae
FG
3 */
4
5#include <string.h>
6#include <stdint.h>
7#include <errno.h>
8#include <stdarg.h>
9#include <stdio.h>
7c673cae
FG
10#include <sys/queue.h>
11
12#include <rte_log.h>
13#include <rte_branch_prediction.h>
14#include <rte_common.h>
15#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16#include <rte_malloc.h>
7c673cae
FG
17#include <rte_eal.h>
18#include <rte_eal_memconfig.h>
19#include <rte_per_lcore.h>
20#include <rte_string_fns.h>
21#include <rte_errno.h>
22#include <rte_rwlock.h>
23#include <rte_spinlock.h>
24
25#include "rte_lpm.h"
26
27TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
28
29static struct rte_tailq_elem rte_lpm_tailq = {
30 .name = "RTE_LPM",
31};
32EAL_REGISTER_TAILQ(rte_lpm_tailq)
33
34#define MAX_DEPTH_TBL24 24
35
36enum valid_flag {
37 INVALID = 0,
38 VALID
39};
40
41/* Macro to enable/disable run-time checks. */
42#if defined(RTE_LIBRTE_LPM_DEBUG)
43#include <rte_debug.h>
44#define VERIFY_DEPTH(depth) do { \
45 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
46 rte_panic("LPM: Invalid depth (%u) at line %d", \
47 (unsigned)(depth), __LINE__); \
48} while (0)
49#else
50#define VERIFY_DEPTH(depth)
51#endif
52
53/*
54 * Converts a given depth value to its corresponding mask value.
55 *
56 * depth (IN) : range = 1 - 32
57 * mask (OUT) : 32bit mask
58 */
59static uint32_t __attribute__((pure))
60depth_to_mask(uint8_t depth)
61{
62 VERIFY_DEPTH(depth);
63
64 /* To calculate a mask start with a 1 on the left hand side and right
65 * shift while populating the left hand side with 1's
66 */
67 return (int)0x80000000 >> (depth - 1);
68}
69
70/*
71 * Converts given depth value to its corresponding range value.
72 */
73static inline uint32_t __attribute__((pure))
74depth_to_range(uint8_t depth)
75{
76 VERIFY_DEPTH(depth);
77
78 /*
79 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
80 */
81 if (depth <= MAX_DEPTH_TBL24)
82 return 1 << (MAX_DEPTH_TBL24 - depth);
83
84 /* Else if depth is greater than 24 */
85 return 1 << (RTE_LPM_MAX_DEPTH - depth);
86}
87
88/*
89 * Find an existing lpm table and return a pointer to it.
90 */
91struct rte_lpm_v20 *
92rte_lpm_find_existing_v20(const char *name)
93{
94 struct rte_lpm_v20 *l = NULL;
95 struct rte_tailq_entry *te;
96 struct rte_lpm_list *lpm_list;
97
98 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
99
100 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
101 TAILQ_FOREACH(te, lpm_list, next) {
9f95a23c 102 l = te->data;
7c673cae
FG
103 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
104 break;
105 }
106 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
107
108 if (te == NULL) {
109 rte_errno = ENOENT;
110 return NULL;
111 }
112
113 return l;
114}
115VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
116
117struct rte_lpm *
118rte_lpm_find_existing_v1604(const char *name)
119{
120 struct rte_lpm *l = NULL;
121 struct rte_tailq_entry *te;
122 struct rte_lpm_list *lpm_list;
123
124 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
125
126 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
127 TAILQ_FOREACH(te, lpm_list, next) {
9f95a23c 128 l = te->data;
7c673cae
FG
129 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
130 break;
131 }
132 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
133
134 if (te == NULL) {
135 rte_errno = ENOENT;
136 return NULL;
137 }
138
139 return l;
140}
141BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
142MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
143 rte_lpm_find_existing_v1604);
144
145/*
146 * Allocates memory for LPM object
147 */
148struct rte_lpm_v20 *
149rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
150 __rte_unused int flags)
151{
152 char mem_name[RTE_LPM_NAMESIZE];
153 struct rte_lpm_v20 *lpm = NULL;
154 struct rte_tailq_entry *te;
155 uint32_t mem_size;
156 struct rte_lpm_list *lpm_list;
157
158 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
159
160 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
161
162 /* Check user arguments. */
163 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
164 rte_errno = EINVAL;
165 return NULL;
166 }
167
168 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
169
170 /* Determine the amount of memory to allocate. */
171 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
172
173 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
174
175 /* guarantee there's no existing */
176 TAILQ_FOREACH(te, lpm_list, next) {
9f95a23c 177 lpm = te->data;
7c673cae
FG
178 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
179 break;
180 }
9f95a23c 181
7c673cae 182 if (te != NULL) {
9f95a23c 183 lpm = NULL;
7c673cae
FG
184 rte_errno = EEXIST;
185 goto exit;
186 }
187
188 /* allocate tailq entry */
189 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
190 if (te == NULL) {
191 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
9f95a23c 192 rte_errno = ENOMEM;
7c673cae
FG
193 goto exit;
194 }
195
196 /* Allocate memory to store the LPM data structures. */
9f95a23c 197 lpm = rte_zmalloc_socket(mem_name, mem_size,
7c673cae
FG
198 RTE_CACHE_LINE_SIZE, socket_id);
199 if (lpm == NULL) {
200 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
201 rte_free(te);
9f95a23c 202 rte_errno = ENOMEM;
7c673cae
FG
203 goto exit;
204 }
205
206 /* Save user arguments. */
207 lpm->max_rules = max_rules;
9f95a23c 208 strlcpy(lpm->name, name, sizeof(lpm->name));
7c673cae 209
9f95a23c 210 te->data = lpm;
7c673cae
FG
211
212 TAILQ_INSERT_TAIL(lpm_list, te, next);
213
214exit:
215 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
216
217 return lpm;
218}
219VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
220
221struct rte_lpm *
222rte_lpm_create_v1604(const char *name, int socket_id,
223 const struct rte_lpm_config *config)
224{
225 char mem_name[RTE_LPM_NAMESIZE];
226 struct rte_lpm *lpm = NULL;
227 struct rte_tailq_entry *te;
228 uint32_t mem_size, rules_size, tbl8s_size;
229 struct rte_lpm_list *lpm_list;
230
231 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
232
233 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
234
235 /* Check user arguments. */
236 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
237 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
238 rte_errno = EINVAL;
239 return NULL;
240 }
241
242 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
243
244 /* Determine the amount of memory to allocate. */
245 mem_size = sizeof(*lpm);
246 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
247 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
248 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
249
250 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
251
252 /* guarantee there's no existing */
253 TAILQ_FOREACH(te, lpm_list, next) {
9f95a23c 254 lpm = te->data;
7c673cae
FG
255 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
256 break;
257 }
9f95a23c 258
7c673cae 259 if (te != NULL) {
9f95a23c 260 lpm = NULL;
7c673cae
FG
261 rte_errno = EEXIST;
262 goto exit;
263 }
264
265 /* allocate tailq entry */
266 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
267 if (te == NULL) {
268 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
9f95a23c 269 rte_errno = ENOMEM;
7c673cae
FG
270 goto exit;
271 }
272
273 /* Allocate memory to store the LPM data structures. */
9f95a23c 274 lpm = rte_zmalloc_socket(mem_name, mem_size,
7c673cae
FG
275 RTE_CACHE_LINE_SIZE, socket_id);
276 if (lpm == NULL) {
277 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
278 rte_free(te);
9f95a23c 279 rte_errno = ENOMEM;
7c673cae
FG
280 goto exit;
281 }
282
9f95a23c 283 lpm->rules_tbl = rte_zmalloc_socket(NULL,
7c673cae
FG
284 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
285
286 if (lpm->rules_tbl == NULL) {
287 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
288 rte_free(lpm);
289 lpm = NULL;
290 rte_free(te);
9f95a23c 291 rte_errno = ENOMEM;
7c673cae
FG
292 goto exit;
293 }
294
9f95a23c 295 lpm->tbl8 = rte_zmalloc_socket(NULL,
7c673cae
FG
296 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
297
298 if (lpm->tbl8 == NULL) {
299 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
300 rte_free(lpm->rules_tbl);
301 rte_free(lpm);
302 lpm = NULL;
303 rte_free(te);
9f95a23c 304 rte_errno = ENOMEM;
7c673cae
FG
305 goto exit;
306 }
307
308 /* Save user arguments. */
309 lpm->max_rules = config->max_rules;
310 lpm->number_tbl8s = config->number_tbl8s;
9f95a23c 311 strlcpy(lpm->name, name, sizeof(lpm->name));
7c673cae 312
9f95a23c 313 te->data = lpm;
7c673cae
FG
314
315 TAILQ_INSERT_TAIL(lpm_list, te, next);
316
317exit:
318 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
319
320 return lpm;
321}
322BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
323MAP_STATIC_SYMBOL(
324 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
325 const struct rte_lpm_config *config), rte_lpm_create_v1604);
326
327/*
328 * Deallocates memory for given LPM table.
329 */
330void
331rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
332{
333 struct rte_lpm_list *lpm_list;
334 struct rte_tailq_entry *te;
335
336 /* Check user arguments. */
337 if (lpm == NULL)
338 return;
339
340 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
341
342 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
343
344 /* find our tailq entry */
345 TAILQ_FOREACH(te, lpm_list, next) {
346 if (te->data == (void *) lpm)
347 break;
348 }
349 if (te != NULL)
350 TAILQ_REMOVE(lpm_list, te, next);
351
352 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
353
354 rte_free(lpm);
355 rte_free(te);
356}
357VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
358
359void
360rte_lpm_free_v1604(struct rte_lpm *lpm)
361{
362 struct rte_lpm_list *lpm_list;
363 struct rte_tailq_entry *te;
364
365 /* Check user arguments. */
366 if (lpm == NULL)
367 return;
368
369 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
370
371 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
372
373 /* find our tailq entry */
374 TAILQ_FOREACH(te, lpm_list, next) {
375 if (te->data == (void *) lpm)
376 break;
377 }
378 if (te != NULL)
379 TAILQ_REMOVE(lpm_list, te, next);
380
381 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
382
383 rte_free(lpm->tbl8);
384 rte_free(lpm->rules_tbl);
385 rte_free(lpm);
386 rte_free(te);
387}
388BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
389MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
390 rte_lpm_free_v1604);
391
392/*
393 * Adds a rule to the rule table.
394 *
395 * NOTE: The rule table is split into 32 groups. Each group contains rules that
396 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
397 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
398 * to refer to depth 1 because even though the depth range is 1 - 32, depths
399 * are stored in the rule table from 0 - 31.
400 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
401 */
402static inline int32_t
403rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
404 uint8_t next_hop)
405{
406 uint32_t rule_gindex, rule_index, last_rule;
407 int i;
408
409 VERIFY_DEPTH(depth);
410
411 /* Scan through rule group to see if rule already exists. */
412 if (lpm->rule_info[depth - 1].used_rules > 0) {
413
414 /* rule_gindex stands for rule group index. */
415 rule_gindex = lpm->rule_info[depth - 1].first_rule;
416 /* Initialise rule_index to point to start of rule group. */
417 rule_index = rule_gindex;
418 /* Last rule = Last used rule in this rule group. */
419 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
420
421 for (; rule_index < last_rule; rule_index++) {
422
423 /* If rule already exists update its next_hop and return. */
424 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
425 lpm->rules_tbl[rule_index].next_hop = next_hop;
426
427 return rule_index;
428 }
429 }
430
431 if (rule_index == lpm->max_rules)
432 return -ENOSPC;
433 } else {
434 /* Calculate the position in which the rule will be stored. */
435 rule_index = 0;
436
437 for (i = depth - 1; i > 0; i--) {
438 if (lpm->rule_info[i - 1].used_rules > 0) {
439 rule_index = lpm->rule_info[i - 1].first_rule
440 + lpm->rule_info[i - 1].used_rules;
441 break;
442 }
443 }
444 if (rule_index == lpm->max_rules)
445 return -ENOSPC;
446
447 lpm->rule_info[depth - 1].first_rule = rule_index;
448 }
449
450 /* Make room for the new rule in the array. */
451 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
452 if (lpm->rule_info[i - 1].first_rule
453 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
454 return -ENOSPC;
455
456 if (lpm->rule_info[i - 1].used_rules > 0) {
457 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
458 + lpm->rule_info[i - 1].used_rules]
459 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
460 lpm->rule_info[i - 1].first_rule++;
461 }
462 }
463
464 /* Add the new rule. */
465 lpm->rules_tbl[rule_index].ip = ip_masked;
466 lpm->rules_tbl[rule_index].next_hop = next_hop;
467
468 /* Increment the used rules counter for this rule group. */
469 lpm->rule_info[depth - 1].used_rules++;
470
471 return rule_index;
472}
473
474static inline int32_t
475rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
476 uint32_t next_hop)
477{
478 uint32_t rule_gindex, rule_index, last_rule;
479 int i;
480
481 VERIFY_DEPTH(depth);
482
483 /* Scan through rule group to see if rule already exists. */
484 if (lpm->rule_info[depth - 1].used_rules > 0) {
485
486 /* rule_gindex stands for rule group index. */
487 rule_gindex = lpm->rule_info[depth - 1].first_rule;
488 /* Initialise rule_index to point to start of rule group. */
489 rule_index = rule_gindex;
490 /* Last rule = Last used rule in this rule group. */
491 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
492
493 for (; rule_index < last_rule; rule_index++) {
494
495 /* If rule already exists update its next_hop and return. */
496 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
497 lpm->rules_tbl[rule_index].next_hop = next_hop;
498
499 return rule_index;
500 }
501 }
502
503 if (rule_index == lpm->max_rules)
504 return -ENOSPC;
505 } else {
506 /* Calculate the position in which the rule will be stored. */
507 rule_index = 0;
508
509 for (i = depth - 1; i > 0; i--) {
510 if (lpm->rule_info[i - 1].used_rules > 0) {
511 rule_index = lpm->rule_info[i - 1].first_rule
512 + lpm->rule_info[i - 1].used_rules;
513 break;
514 }
515 }
516 if (rule_index == lpm->max_rules)
517 return -ENOSPC;
518
519 lpm->rule_info[depth - 1].first_rule = rule_index;
520 }
521
522 /* Make room for the new rule in the array. */
523 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
524 if (lpm->rule_info[i - 1].first_rule
525 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
526 return -ENOSPC;
527
528 if (lpm->rule_info[i - 1].used_rules > 0) {
529 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
530 + lpm->rule_info[i - 1].used_rules]
531 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
532 lpm->rule_info[i - 1].first_rule++;
533 }
534 }
535
536 /* Add the new rule. */
537 lpm->rules_tbl[rule_index].ip = ip_masked;
538 lpm->rules_tbl[rule_index].next_hop = next_hop;
539
540 /* Increment the used rules counter for this rule group. */
541 lpm->rule_info[depth - 1].used_rules++;
542
543 return rule_index;
544}
545
546/*
547 * Delete a rule from the rule table.
548 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
549 */
550static inline void
551rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
552{
553 int i;
554
555 VERIFY_DEPTH(depth);
556
557 lpm->rules_tbl[rule_index] =
558 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
559 + lpm->rule_info[depth - 1].used_rules - 1];
560
561 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
562 if (lpm->rule_info[i].used_rules > 0) {
563 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
564 lpm->rules_tbl[lpm->rule_info[i].first_rule
565 + lpm->rule_info[i].used_rules - 1];
566 lpm->rule_info[i].first_rule--;
567 }
568 }
569
570 lpm->rule_info[depth - 1].used_rules--;
571}
572
573static inline void
574rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
575{
576 int i;
577
578 VERIFY_DEPTH(depth);
579
580 lpm->rules_tbl[rule_index] =
581 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
582 + lpm->rule_info[depth - 1].used_rules - 1];
583
584 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
585 if (lpm->rule_info[i].used_rules > 0) {
586 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
587 lpm->rules_tbl[lpm->rule_info[i].first_rule
588 + lpm->rule_info[i].used_rules - 1];
589 lpm->rule_info[i].first_rule--;
590 }
591 }
592
593 lpm->rule_info[depth - 1].used_rules--;
594}
595
596/*
597 * Finds a rule in rule table.
598 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
599 */
600static inline int32_t
601rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
602{
603 uint32_t rule_gindex, last_rule, rule_index;
604
605 VERIFY_DEPTH(depth);
606
607 rule_gindex = lpm->rule_info[depth - 1].first_rule;
608 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
609
610 /* Scan used rules at given depth to find rule. */
611 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
612 /* If rule is found return the rule index. */
613 if (lpm->rules_tbl[rule_index].ip == ip_masked)
614 return rule_index;
615 }
616
617 /* If rule is not found return -EINVAL. */
618 return -EINVAL;
619}
620
621static inline int32_t
622rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
623{
624 uint32_t rule_gindex, last_rule, rule_index;
625
626 VERIFY_DEPTH(depth);
627
628 rule_gindex = lpm->rule_info[depth - 1].first_rule;
629 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
630
631 /* Scan used rules at given depth to find rule. */
632 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
633 /* If rule is found return the rule index. */
634 if (lpm->rules_tbl[rule_index].ip == ip_masked)
635 return rule_index;
636 }
637
638 /* If rule is not found return -EINVAL. */
639 return -EINVAL;
640}
641
642/*
643 * Find, clean and allocate a tbl8.
644 */
645static inline int32_t
646tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
647{
648 uint32_t group_idx; /* tbl8 group index. */
649 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
650
651 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
652 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
653 group_idx++) {
654 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
655 /* If a free tbl8 group is found clean it and set as VALID. */
656 if (!tbl8_entry->valid_group) {
657 memset(&tbl8_entry[0], 0,
658 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
659 sizeof(tbl8_entry[0]));
660
661 tbl8_entry->valid_group = VALID;
662
663 /* Return group index for allocated tbl8 group. */
664 return group_idx;
665 }
666 }
667
668 /* If there are no tbl8 groups free then return error. */
669 return -ENOSPC;
670}
671
672static inline int32_t
673tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
674{
675 uint32_t group_idx; /* tbl8 group index. */
676 struct rte_lpm_tbl_entry *tbl8_entry;
677
678 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
679 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
680 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
681 /* If a free tbl8 group is found clean it and set as VALID. */
682 if (!tbl8_entry->valid_group) {
683 memset(&tbl8_entry[0], 0,
684 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
685 sizeof(tbl8_entry[0]));
686
687 tbl8_entry->valid_group = VALID;
688
689 /* Return group index for allocated tbl8 group. */
690 return group_idx;
691 }
692 }
693
694 /* If there are no tbl8 groups free then return error. */
695 return -ENOSPC;
696}
697
698static inline void
699tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
700{
701 /* Set tbl8 group invalid*/
702 tbl8[tbl8_group_start].valid_group = INVALID;
703}
704
705static inline void
706tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
707{
708 /* Set tbl8 group invalid*/
709 tbl8[tbl8_group_start].valid_group = INVALID;
710}
711
712static inline int32_t
713add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
714 uint8_t next_hop)
715{
716 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
717
718 /* Calculate the index into Table24. */
719 tbl24_index = ip >> 8;
720 tbl24_range = depth_to_range(depth);
721
722 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
723 /*
724 * For invalid OR valid and non-extended tbl 24 entries set
725 * entry.
726 */
727 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
728 lpm->tbl24[i].depth <= depth)) {
729
730 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
731 .valid = VALID,
732 .valid_group = 0,
733 .depth = depth,
734 };
735 new_tbl24_entry.next_hop = next_hop;
736
737 /* Setting tbl24 entry in one go to avoid race
738 * conditions
739 */
740 lpm->tbl24[i] = new_tbl24_entry;
741
742 continue;
743 }
744
745 if (lpm->tbl24[i].valid_group == 1) {
746 /* If tbl24 entry is valid and extended calculate the
747 * index into tbl8.
748 */
749 tbl8_index = lpm->tbl24[i].group_idx *
750 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
751 tbl8_group_end = tbl8_index +
752 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
753
754 for (j = tbl8_index; j < tbl8_group_end; j++) {
755 if (!lpm->tbl8[j].valid ||
756 lpm->tbl8[j].depth <= depth) {
757 struct rte_lpm_tbl_entry_v20
758 new_tbl8_entry = {
759 .valid = VALID,
760 .valid_group = VALID,
761 .depth = depth,
762 };
763 new_tbl8_entry.next_hop = next_hop;
764
765 /*
766 * Setting tbl8 entry in one go to avoid
767 * race conditions
768 */
769 lpm->tbl8[j] = new_tbl8_entry;
770
771 continue;
772 }
773 }
774 }
775 }
776
777 return 0;
778}
779
780static inline int32_t
781add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
782 uint32_t next_hop)
783{
784#define group_idx next_hop
785 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
786
787 /* Calculate the index into Table24. */
788 tbl24_index = ip >> 8;
789 tbl24_range = depth_to_range(depth);
790
791 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
792 /*
793 * For invalid OR valid and non-extended tbl 24 entries set
794 * entry.
795 */
796 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
797 lpm->tbl24[i].depth <= depth)) {
798
799 struct rte_lpm_tbl_entry new_tbl24_entry = {
800 .next_hop = next_hop,
801 .valid = VALID,
802 .valid_group = 0,
803 .depth = depth,
804 };
805
806 /* Setting tbl24 entry in one go to avoid race
807 * conditions
808 */
809 lpm->tbl24[i] = new_tbl24_entry;
810
811 continue;
812 }
813
814 if (lpm->tbl24[i].valid_group == 1) {
815 /* If tbl24 entry is valid and extended calculate the
816 * index into tbl8.
817 */
818 tbl8_index = lpm->tbl24[i].group_idx *
819 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
820 tbl8_group_end = tbl8_index +
821 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
822
823 for (j = tbl8_index; j < tbl8_group_end; j++) {
824 if (!lpm->tbl8[j].valid ||
825 lpm->tbl8[j].depth <= depth) {
826 struct rte_lpm_tbl_entry
827 new_tbl8_entry = {
828 .valid = VALID,
829 .valid_group = VALID,
830 .depth = depth,
831 .next_hop = next_hop,
832 };
833
834 /*
835 * Setting tbl8 entry in one go to avoid
836 * race conditions
837 */
838 lpm->tbl8[j] = new_tbl8_entry;
839
840 continue;
841 }
842 }
843 }
844 }
845#undef group_idx
846 return 0;
847}
848
849static inline int32_t
850add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
851 uint8_t next_hop)
852{
853 uint32_t tbl24_index;
854 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
855 tbl8_range, i;
856
857 tbl24_index = (ip_masked >> 8);
858 tbl8_range = depth_to_range(depth);
859
860 if (!lpm->tbl24[tbl24_index].valid) {
861 /* Search for a free tbl8 group. */
862 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
863
864 /* Check tbl8 allocation was successful. */
865 if (tbl8_group_index < 0) {
866 return tbl8_group_index;
867 }
868
869 /* Find index into tbl8 and range. */
870 tbl8_index = (tbl8_group_index *
871 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
872 (ip_masked & 0xFF);
873
874 /* Set tbl8 entry. */
875 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
876 lpm->tbl8[i].depth = depth;
877 lpm->tbl8[i].next_hop = next_hop;
878 lpm->tbl8[i].valid = VALID;
879 }
880
881 /*
882 * Update tbl24 entry to point to new tbl8 entry. Note: The
883 * ext_flag and tbl8_index need to be updated simultaneously,
884 * so assign whole structure in one go
885 */
886
887 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
9f95a23c 888 .group_idx = (uint8_t)tbl8_group_index,
7c673cae
FG
889 .valid = VALID,
890 .valid_group = 1,
891 .depth = 0,
892 };
893
894 lpm->tbl24[tbl24_index] = new_tbl24_entry;
895
896 } /* If valid entry but not extended calculate the index into Table8. */
897 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
898 /* Search for free tbl8 group. */
899 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
900
901 if (tbl8_group_index < 0) {
902 return tbl8_group_index;
903 }
904
905 tbl8_group_start = tbl8_group_index *
906 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
907 tbl8_group_end = tbl8_group_start +
908 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
909
910 /* Populate new tbl8 with tbl24 value. */
911 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
912 lpm->tbl8[i].valid = VALID;
913 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
914 lpm->tbl8[i].next_hop =
915 lpm->tbl24[tbl24_index].next_hop;
916 }
917
918 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
919
920 /* Insert new rule into the tbl8 entry. */
921 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
922 lpm->tbl8[i].valid = VALID;
923 lpm->tbl8[i].depth = depth;
924 lpm->tbl8[i].next_hop = next_hop;
925 }
926
927 /*
928 * Update tbl24 entry to point to new tbl8 entry. Note: The
929 * ext_flag and tbl8_index need to be updated simultaneously,
930 * so assign whole structure in one go.
931 */
932
933 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
9f95a23c 934 .group_idx = (uint8_t)tbl8_group_index,
7c673cae
FG
935 .valid = VALID,
936 .valid_group = 1,
937 .depth = 0,
938 };
939
940 lpm->tbl24[tbl24_index] = new_tbl24_entry;
941
942 } else { /*
943 * If it is valid, extended entry calculate the index into tbl8.
944 */
945 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
946 tbl8_group_start = tbl8_group_index *
947 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
948 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
949
950 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
951
952 if (!lpm->tbl8[i].valid ||
953 lpm->tbl8[i].depth <= depth) {
954 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
955 .valid = VALID,
956 .depth = depth,
957 .valid_group = lpm->tbl8[i].valid_group,
958 };
959 new_tbl8_entry.next_hop = next_hop;
960 /*
961 * Setting tbl8 entry in one go to avoid race
962 * condition
963 */
964 lpm->tbl8[i] = new_tbl8_entry;
965
966 continue;
967 }
968 }
969 }
970
971 return 0;
972}
973
974static inline int32_t
975add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
976 uint32_t next_hop)
977{
978#define group_idx next_hop
979 uint32_t tbl24_index;
980 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
981 tbl8_range, i;
982
983 tbl24_index = (ip_masked >> 8);
984 tbl8_range = depth_to_range(depth);
985
986 if (!lpm->tbl24[tbl24_index].valid) {
987 /* Search for a free tbl8 group. */
988 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
989
990 /* Check tbl8 allocation was successful. */
991 if (tbl8_group_index < 0) {
992 return tbl8_group_index;
993 }
994
995 /* Find index into tbl8 and range. */
996 tbl8_index = (tbl8_group_index *
997 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
998 (ip_masked & 0xFF);
999
1000 /* Set tbl8 entry. */
1001 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1002 lpm->tbl8[i].depth = depth;
1003 lpm->tbl8[i].next_hop = next_hop;
1004 lpm->tbl8[i].valid = VALID;
1005 }
1006
1007 /*
1008 * Update tbl24 entry to point to new tbl8 entry. Note: The
1009 * ext_flag and tbl8_index need to be updated simultaneously,
1010 * so assign whole structure in one go
1011 */
1012
1013 struct rte_lpm_tbl_entry new_tbl24_entry = {
9f95a23c 1014 .group_idx = tbl8_group_index,
7c673cae
FG
1015 .valid = VALID,
1016 .valid_group = 1,
1017 .depth = 0,
1018 };
1019
1020 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1021
1022 } /* If valid entry but not extended calculate the index into Table8. */
1023 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1024 /* Search for free tbl8 group. */
1025 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1026
1027 if (tbl8_group_index < 0) {
1028 return tbl8_group_index;
1029 }
1030
1031 tbl8_group_start = tbl8_group_index *
1032 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1033 tbl8_group_end = tbl8_group_start +
1034 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1035
1036 /* Populate new tbl8 with tbl24 value. */
1037 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1038 lpm->tbl8[i].valid = VALID;
1039 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1040 lpm->tbl8[i].next_hop =
1041 lpm->tbl24[tbl24_index].next_hop;
1042 }
1043
1044 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1045
1046 /* Insert new rule into the tbl8 entry. */
1047 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1048 lpm->tbl8[i].valid = VALID;
1049 lpm->tbl8[i].depth = depth;
1050 lpm->tbl8[i].next_hop = next_hop;
1051 }
1052
1053 /*
1054 * Update tbl24 entry to point to new tbl8 entry. Note: The
1055 * ext_flag and tbl8_index need to be updated simultaneously,
1056 * so assign whole structure in one go.
1057 */
1058
1059 struct rte_lpm_tbl_entry new_tbl24_entry = {
9f95a23c 1060 .group_idx = tbl8_group_index,
7c673cae
FG
1061 .valid = VALID,
1062 .valid_group = 1,
1063 .depth = 0,
1064 };
1065
1066 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1067
1068 } else { /*
1069 * If it is valid, extended entry calculate the index into tbl8.
1070 */
1071 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1072 tbl8_group_start = tbl8_group_index *
1073 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1074 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1075
1076 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1077
1078 if (!lpm->tbl8[i].valid ||
1079 lpm->tbl8[i].depth <= depth) {
1080 struct rte_lpm_tbl_entry new_tbl8_entry = {
1081 .valid = VALID,
1082 .depth = depth,
1083 .next_hop = next_hop,
1084 .valid_group = lpm->tbl8[i].valid_group,
1085 };
1086
1087 /*
1088 * Setting tbl8 entry in one go to avoid race
1089 * condition
1090 */
1091 lpm->tbl8[i] = new_tbl8_entry;
1092
1093 continue;
1094 }
1095 }
1096 }
1097#undef group_idx
1098 return 0;
1099}
1100
1101/*
1102 * Add a route
1103 */
1104int
1105rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1106 uint8_t next_hop)
1107{
1108 int32_t rule_index, status = 0;
1109 uint32_t ip_masked;
1110
1111 /* Check user arguments. */
1112 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1113 return -EINVAL;
1114
1115 ip_masked = ip & depth_to_mask(depth);
1116
1117 /* Add the rule to the rule table. */
1118 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1119
1120 /* If the is no space available for new rule return error. */
1121 if (rule_index < 0) {
1122 return rule_index;
1123 }
1124
1125 if (depth <= MAX_DEPTH_TBL24) {
1126 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1127 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1128 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1129
1130 /*
1131 * If add fails due to exhaustion of tbl8 extensions delete
1132 * rule that was added to rule table.
1133 */
1134 if (status < 0) {
1135 rule_delete_v20(lpm, rule_index, depth);
1136
1137 return status;
1138 }
1139 }
1140
1141 return 0;
1142}
1143VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1144
1145int
1146rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1147 uint32_t next_hop)
1148{
1149 int32_t rule_index, status = 0;
1150 uint32_t ip_masked;
1151
1152 /* Check user arguments. */
1153 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1154 return -EINVAL;
1155
1156 ip_masked = ip & depth_to_mask(depth);
1157
1158 /* Add the rule to the rule table. */
1159 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1160
1161 /* If the is no space available for new rule return error. */
1162 if (rule_index < 0) {
1163 return rule_index;
1164 }
1165
1166 if (depth <= MAX_DEPTH_TBL24) {
1167 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1168 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1169 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1170
1171 /*
1172 * If add fails due to exhaustion of tbl8 extensions delete
1173 * rule that was added to rule table.
1174 */
1175 if (status < 0) {
1176 rule_delete_v1604(lpm, rule_index, depth);
1177
1178 return status;
1179 }
1180 }
1181
1182 return 0;
1183}
1184BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1185MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1186 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1187
1188/*
1189 * Look for a rule in the high-level rules table
1190 */
1191int
1192rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1193uint8_t *next_hop)
1194{
1195 uint32_t ip_masked;
1196 int32_t rule_index;
1197
1198 /* Check user arguments. */
1199 if ((lpm == NULL) ||
1200 (next_hop == NULL) ||
1201 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1202 return -EINVAL;
1203
1204 /* Look for the rule using rule_find. */
1205 ip_masked = ip & depth_to_mask(depth);
1206 rule_index = rule_find_v20(lpm, ip_masked, depth);
1207
1208 if (rule_index >= 0) {
1209 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1210 return 1;
1211 }
1212
1213 /* If rule is not found return 0. */
1214 return 0;
1215}
1216VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1217
1218int
1219rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1220uint32_t *next_hop)
1221{
1222 uint32_t ip_masked;
1223 int32_t rule_index;
1224
1225 /* Check user arguments. */
1226 if ((lpm == NULL) ||
1227 (next_hop == NULL) ||
1228 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1229 return -EINVAL;
1230
1231 /* Look for the rule using rule_find. */
1232 ip_masked = ip & depth_to_mask(depth);
1233 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1234
1235 if (rule_index >= 0) {
1236 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1237 return 1;
1238 }
1239
1240 /* If rule is not found return 0. */
1241 return 0;
1242}
1243BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1244MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1245 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1246
1247static inline int32_t
1248find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1249 uint8_t *sub_rule_depth)
1250{
1251 int32_t rule_index;
1252 uint32_t ip_masked;
1253 uint8_t prev_depth;
1254
1255 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1256 ip_masked = ip & depth_to_mask(prev_depth);
1257
1258 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1259
1260 if (rule_index >= 0) {
1261 *sub_rule_depth = prev_depth;
1262 return rule_index;
1263 }
1264 }
1265
1266 return -1;
1267}
1268
1269static inline int32_t
1270find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1271 uint8_t *sub_rule_depth)
1272{
1273 int32_t rule_index;
1274 uint32_t ip_masked;
1275 uint8_t prev_depth;
1276
1277 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1278 ip_masked = ip & depth_to_mask(prev_depth);
1279
1280 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1281
1282 if (rule_index >= 0) {
1283 *sub_rule_depth = prev_depth;
1284 return rule_index;
1285 }
1286 }
1287
1288 return -1;
1289}
1290
1291static inline int32_t
1292delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1293 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1294{
1295 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1296
1297 /* Calculate the range and index into Table24. */
1298 tbl24_range = depth_to_range(depth);
1299 tbl24_index = (ip_masked >> 8);
1300
1301 /*
1302 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1303 * and a positive number indicates a sub_rule_index.
1304 */
1305 if (sub_rule_index < 0) {
1306 /*
1307 * If no replacement rule exists then invalidate entries
1308 * associated with this rule.
1309 */
1310 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1311
1312 if (lpm->tbl24[i].valid_group == 0 &&
1313 lpm->tbl24[i].depth <= depth) {
1314 lpm->tbl24[i].valid = INVALID;
1315 } else if (lpm->tbl24[i].valid_group == 1) {
1316 /*
1317 * If TBL24 entry is extended, then there has
1318 * to be a rule with depth >= 25 in the
1319 * associated TBL8 group.
1320 */
1321
1322 tbl8_group_index = lpm->tbl24[i].group_idx;
1323 tbl8_index = tbl8_group_index *
1324 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1325
1326 for (j = tbl8_index; j < (tbl8_index +
1327 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1328
1329 if (lpm->tbl8[j].depth <= depth)
1330 lpm->tbl8[j].valid = INVALID;
1331 }
1332 }
1333 }
1334 } else {
1335 /*
1336 * If a replacement rule exists then modify entries
1337 * associated with this rule.
1338 */
1339
1340 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
9f95a23c 1341 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
7c673cae
FG
1342 .valid = VALID,
1343 .valid_group = 0,
1344 .depth = sub_rule_depth,
1345 };
1346
1347 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1348 .valid = VALID,
1349 .valid_group = VALID,
1350 .depth = sub_rule_depth,
1351 };
1352 new_tbl8_entry.next_hop =
1353 lpm->rules_tbl[sub_rule_index].next_hop;
1354
1355 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1356
1357 if (lpm->tbl24[i].valid_group == 0 &&
1358 lpm->tbl24[i].depth <= depth) {
1359 lpm->tbl24[i] = new_tbl24_entry;
1360 } else if (lpm->tbl24[i].valid_group == 1) {
1361 /*
1362 * If TBL24 entry is extended, then there has
1363 * to be a rule with depth >= 25 in the
1364 * associated TBL8 group.
1365 */
1366
1367 tbl8_group_index = lpm->tbl24[i].group_idx;
1368 tbl8_index = tbl8_group_index *
1369 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1370
1371 for (j = tbl8_index; j < (tbl8_index +
1372 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1373
1374 if (lpm->tbl8[j].depth <= depth)
1375 lpm->tbl8[j] = new_tbl8_entry;
1376 }
1377 }
1378 }
1379 }
1380
1381 return 0;
1382}
1383
1384static inline int32_t
1385delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1386 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1387{
1388#define group_idx next_hop
1389 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1390
1391 /* Calculate the range and index into Table24. */
1392 tbl24_range = depth_to_range(depth);
1393 tbl24_index = (ip_masked >> 8);
1394
1395 /*
1396 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1397 * and a positive number indicates a sub_rule_index.
1398 */
1399 if (sub_rule_index < 0) {
1400 /*
1401 * If no replacement rule exists then invalidate entries
1402 * associated with this rule.
1403 */
1404 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1405
1406 if (lpm->tbl24[i].valid_group == 0 &&
1407 lpm->tbl24[i].depth <= depth) {
1408 lpm->tbl24[i].valid = INVALID;
1409 } else if (lpm->tbl24[i].valid_group == 1) {
1410 /*
1411 * If TBL24 entry is extended, then there has
1412 * to be a rule with depth >= 25 in the
1413 * associated TBL8 group.
1414 */
1415
1416 tbl8_group_index = lpm->tbl24[i].group_idx;
1417 tbl8_index = tbl8_group_index *
1418 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1419
1420 for (j = tbl8_index; j < (tbl8_index +
1421 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1422
1423 if (lpm->tbl8[j].depth <= depth)
1424 lpm->tbl8[j].valid = INVALID;
1425 }
1426 }
1427 }
1428 } else {
1429 /*
1430 * If a replacement rule exists then modify entries
1431 * associated with this rule.
1432 */
1433
1434 struct rte_lpm_tbl_entry new_tbl24_entry = {
1435 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1436 .valid = VALID,
1437 .valid_group = 0,
1438 .depth = sub_rule_depth,
1439 };
1440
1441 struct rte_lpm_tbl_entry new_tbl8_entry = {
1442 .valid = VALID,
1443 .valid_group = VALID,
1444 .depth = sub_rule_depth,
1445 .next_hop = lpm->rules_tbl
1446 [sub_rule_index].next_hop,
1447 };
1448
1449 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1450
1451 if (lpm->tbl24[i].valid_group == 0 &&
1452 lpm->tbl24[i].depth <= depth) {
1453 lpm->tbl24[i] = new_tbl24_entry;
1454 } else if (lpm->tbl24[i].valid_group == 1) {
1455 /*
1456 * If TBL24 entry is extended, then there has
1457 * to be a rule with depth >= 25 in the
1458 * associated TBL8 group.
1459 */
1460
1461 tbl8_group_index = lpm->tbl24[i].group_idx;
1462 tbl8_index = tbl8_group_index *
1463 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1464
1465 for (j = tbl8_index; j < (tbl8_index +
1466 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1467
1468 if (lpm->tbl8[j].depth <= depth)
1469 lpm->tbl8[j] = new_tbl8_entry;
1470 }
1471 }
1472 }
1473 }
1474#undef group_idx
1475 return 0;
1476}
1477
1478/*
1479 * Checks if table 8 group can be recycled.
1480 *
1481 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1482 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1483 * Return of value > -1 means tbl8 is in use but has all the same values and
1484 * thus can be recycled
1485 */
1486static inline int32_t
1487tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1488 uint32_t tbl8_group_start)
1489{
1490 uint32_t tbl8_group_end, i;
1491 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1492
1493 /*
1494 * Check the first entry of the given tbl8. If it is invalid we know
1495 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1496 * (As they would affect all entries in a tbl8) and thus this table
1497 * can not be recycled.
1498 */
1499 if (tbl8[tbl8_group_start].valid) {
1500 /*
1501 * If first entry is valid check if the depth is less than 24
1502 * and if so check the rest of the entries to verify that they
1503 * are all of this depth.
1504 */
1505 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1506 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1507 i++) {
1508
1509 if (tbl8[i].depth !=
1510 tbl8[tbl8_group_start].depth) {
1511
1512 return -EEXIST;
1513 }
1514 }
1515 /* If all entries are the same return the tb8 index */
1516 return tbl8_group_start;
1517 }
1518
1519 return -EEXIST;
1520 }
1521 /*
1522 * If the first entry is invalid check if the rest of the entries in
1523 * the tbl8 are invalid.
1524 */
1525 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1526 if (tbl8[i].valid)
1527 return -EEXIST;
1528 }
1529 /* If no valid entries are found then return -EINVAL. */
1530 return -EINVAL;
1531}
1532
1533static inline int32_t
1534tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1535 uint32_t tbl8_group_start)
1536{
1537 uint32_t tbl8_group_end, i;
1538 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1539
1540 /*
1541 * Check the first entry of the given tbl8. If it is invalid we know
1542 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1543 * (As they would affect all entries in a tbl8) and thus this table
1544 * can not be recycled.
1545 */
1546 if (tbl8[tbl8_group_start].valid) {
1547 /*
1548 * If first entry is valid check if the depth is less than 24
1549 * and if so check the rest of the entries to verify that they
1550 * are all of this depth.
1551 */
1552 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1553 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1554 i++) {
1555
1556 if (tbl8[i].depth !=
1557 tbl8[tbl8_group_start].depth) {
1558
1559 return -EEXIST;
1560 }
1561 }
1562 /* If all entries are the same return the tb8 index */
1563 return tbl8_group_start;
1564 }
1565
1566 return -EEXIST;
1567 }
1568 /*
1569 * If the first entry is invalid check if the rest of the entries in
1570 * the tbl8 are invalid.
1571 */
1572 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1573 if (tbl8[i].valid)
1574 return -EEXIST;
1575 }
1576 /* If no valid entries are found then return -EINVAL. */
1577 return -EINVAL;
1578}
1579
1580static inline int32_t
1581delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1582 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1583{
1584 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1585 tbl8_range, i;
1586 int32_t tbl8_recycle_index;
1587
1588 /*
1589 * Calculate the index into tbl24 and range. Note: All depths larger
1590 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1591 */
1592 tbl24_index = ip_masked >> 8;
1593
1594 /* Calculate the index into tbl8 and range. */
1595 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1596 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1597 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1598 tbl8_range = depth_to_range(depth);
1599
1600 if (sub_rule_index < 0) {
1601 /*
1602 * Loop through the range of entries on tbl8 for which the
1603 * rule_to_delete must be removed or modified.
1604 */
1605 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1606 if (lpm->tbl8[i].depth <= depth)
1607 lpm->tbl8[i].valid = INVALID;
1608 }
1609 } else {
1610 /* Set new tbl8 entry. */
1611 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1612 .valid = VALID,
1613 .depth = sub_rule_depth,
1614 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1615 };
1616
1617 new_tbl8_entry.next_hop =
1618 lpm->rules_tbl[sub_rule_index].next_hop;
1619 /*
1620 * Loop through the range of entries on tbl8 for which the
1621 * rule_to_delete must be modified.
1622 */
1623 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1624 if (lpm->tbl8[i].depth <= depth)
1625 lpm->tbl8[i] = new_tbl8_entry;
1626 }
1627 }
1628
1629 /*
1630 * Check if there are any valid entries in this tbl8 group. If all
1631 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1632 * associated tbl24 entry.
1633 */
1634
1635 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1636
1637 if (tbl8_recycle_index == -EINVAL) {
1638 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1639 lpm->tbl24[tbl24_index].valid = 0;
1640 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1641 } else if (tbl8_recycle_index > -1) {
1642 /* Update tbl24 entry. */
1643 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
9f95a23c 1644 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
7c673cae
FG
1645 .valid = VALID,
1646 .valid_group = 0,
1647 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1648 };
1649
1650 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1651 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1652 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1653 }
1654
1655 return 0;
1656}
1657
1658static inline int32_t
1659delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1660 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1661{
1662#define group_idx next_hop
1663 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1664 tbl8_range, i;
1665 int32_t tbl8_recycle_index;
1666
1667 /*
1668 * Calculate the index into tbl24 and range. Note: All depths larger
1669 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1670 */
1671 tbl24_index = ip_masked >> 8;
1672
1673 /* Calculate the index into tbl8 and range. */
1674 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1675 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1676 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1677 tbl8_range = depth_to_range(depth);
1678
1679 if (sub_rule_index < 0) {
1680 /*
1681 * Loop through the range of entries on tbl8 for which the
1682 * rule_to_delete must be removed or modified.
1683 */
1684 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1685 if (lpm->tbl8[i].depth <= depth)
1686 lpm->tbl8[i].valid = INVALID;
1687 }
1688 } else {
1689 /* Set new tbl8 entry. */
1690 struct rte_lpm_tbl_entry new_tbl8_entry = {
1691 .valid = VALID,
1692 .depth = sub_rule_depth,
1693 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1694 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1695 };
1696
1697 /*
1698 * Loop through the range of entries on tbl8 for which the
1699 * rule_to_delete must be modified.
1700 */
1701 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1702 if (lpm->tbl8[i].depth <= depth)
1703 lpm->tbl8[i] = new_tbl8_entry;
1704 }
1705 }
1706
1707 /*
1708 * Check if there are any valid entries in this tbl8 group. If all
1709 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1710 * associated tbl24 entry.
1711 */
1712
1713 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1714
1715 if (tbl8_recycle_index == -EINVAL) {
1716 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1717 lpm->tbl24[tbl24_index].valid = 0;
1718 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1719 } else if (tbl8_recycle_index > -1) {
1720 /* Update tbl24 entry. */
1721 struct rte_lpm_tbl_entry new_tbl24_entry = {
1722 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1723 .valid = VALID,
1724 .valid_group = 0,
1725 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1726 };
1727
1728 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1729 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1730 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1731 }
1732#undef group_idx
1733 return 0;
1734}
1735
1736/*
1737 * Deletes a rule
1738 */
1739int
1740rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1741{
1742 int32_t rule_to_delete_index, sub_rule_index;
1743 uint32_t ip_masked;
1744 uint8_t sub_rule_depth;
1745 /*
1746 * Check input arguments. Note: IP must be a positive integer of 32
1747 * bits in length therefore it need not be checked.
1748 */
1749 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1750 return -EINVAL;
1751 }
1752
1753 ip_masked = ip & depth_to_mask(depth);
1754
1755 /*
1756 * Find the index of the input rule, that needs to be deleted, in the
1757 * rule table.
1758 */
1759 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1760
1761 /*
1762 * Check if rule_to_delete_index was found. If no rule was found the
1763 * function rule_find returns -EINVAL.
1764 */
1765 if (rule_to_delete_index < 0)
1766 return -EINVAL;
1767
1768 /* Delete the rule from the rule table. */
1769 rule_delete_v20(lpm, rule_to_delete_index, depth);
1770
1771 /*
1772 * Find rule to replace the rule_to_delete. If there is no rule to
1773 * replace the rule_to_delete we return -1 and invalidate the table
1774 * entries associated with this rule.
1775 */
1776 sub_rule_depth = 0;
1777 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1778
1779 /*
1780 * If the input depth value is less than 25 use function
1781 * delete_depth_small otherwise use delete_depth_big.
1782 */
1783 if (depth <= MAX_DEPTH_TBL24) {
1784 return delete_depth_small_v20(lpm, ip_masked, depth,
1785 sub_rule_index, sub_rule_depth);
1786 } else { /* If depth > MAX_DEPTH_TBL24 */
1787 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1788 sub_rule_depth);
1789 }
1790}
1791VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1792
1793int
1794rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1795{
1796 int32_t rule_to_delete_index, sub_rule_index;
1797 uint32_t ip_masked;
1798 uint8_t sub_rule_depth;
1799 /*
1800 * Check input arguments. Note: IP must be a positive integer of 32
1801 * bits in length therefore it need not be checked.
1802 */
1803 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1804 return -EINVAL;
1805 }
1806
1807 ip_masked = ip & depth_to_mask(depth);
1808
1809 /*
1810 * Find the index of the input rule, that needs to be deleted, in the
1811 * rule table.
1812 */
1813 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1814
1815 /*
1816 * Check if rule_to_delete_index was found. If no rule was found the
1817 * function rule_find returns -EINVAL.
1818 */
1819 if (rule_to_delete_index < 0)
1820 return -EINVAL;
1821
1822 /* Delete the rule from the rule table. */
1823 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1824
1825 /*
1826 * Find rule to replace the rule_to_delete. If there is no rule to
1827 * replace the rule_to_delete we return -1 and invalidate the table
1828 * entries associated with this rule.
1829 */
1830 sub_rule_depth = 0;
1831 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1832
1833 /*
1834 * If the input depth value is less than 25 use function
1835 * delete_depth_small otherwise use delete_depth_big.
1836 */
1837 if (depth <= MAX_DEPTH_TBL24) {
1838 return delete_depth_small_v1604(lpm, ip_masked, depth,
1839 sub_rule_index, sub_rule_depth);
1840 } else { /* If depth > MAX_DEPTH_TBL24 */
1841 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1842 sub_rule_depth);
1843 }
1844}
1845BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1846MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1847 uint8_t depth), rte_lpm_delete_v1604);
1848
1849/*
1850 * Delete all rules from the LPM table.
1851 */
1852void
1853rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1854{
1855 /* Zero rule information. */
1856 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1857
1858 /* Zero tbl24. */
1859 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1860
1861 /* Zero tbl8. */
1862 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1863
1864 /* Delete all rules form the rules table. */
1865 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1866}
1867VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1868
1869void
1870rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1871{
1872 /* Zero rule information. */
1873 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1874
1875 /* Zero tbl24. */
1876 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1877
1878 /* Zero tbl8. */
1879 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1880 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1881
1882 /* Delete all rules form the rules table. */
1883 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1884}
1885BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1886MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1887 rte_lpm_delete_all_v1604);