]> git.proxmox.com Git - mirror_frr.git/blob - lib/cspf.c
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / lib / cspf.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Constraints Shortest Path First algorithms - cspf.c
4 *
5 * Author: Olivier Dugeon <olivier.dugeon@orange.com>
6 *
7 * Copyright (C) 2022 Orange http://www.orange.com
8 *
9 * This file is part of Free Range Routing (FRR).
10 */
11
12 #include <zebra.h>
13
14 #include "if.h"
15 #include "linklist.h"
16 #include "log.h"
17 #include "hash.h"
18 #include "memory.h"
19 #include "prefix.h"
20 #include "table.h"
21 #include "stream.h"
22 #include "printfrr.h"
23 #include "link_state.h"
24 #include "cspf.h"
25
26 /* Link State Memory allocation */
27 DEFINE_MTYPE_STATIC(LIB, PCA, "Path Computation Algorithms");
28
29 /**
30 * Create new Constrained Path. Memory is dynamically allocated.
31 *
32 * @param key Vertex key of the destination of this path
33 *
34 * @return Pointer to a new Constrained Path structure
35 */
36 static struct c_path *cpath_new(uint64_t key)
37 {
38 struct c_path *path;
39
40 /* Sanity Check */
41 if (key == 0)
42 return NULL;
43
44 path = XCALLOC(MTYPE_PCA, sizeof(struct c_path));
45 path->dst = key;
46 path->status = IN_PROGRESS;
47 path->edges = list_new();
48 path->weight = MAX_COST;
49
50 return path;
51 }
52
53 /**
54 * Copy src Constrained Path into dst Constrained Path. A new Constrained Path
55 * structure is dynamically allocated if dst is NULL. If src is NULL, the
56 * function return the dst disregarding if it is NULL or not.
57 *
58 * @param dest Destination Constrained Path structure
59 * @param src Source Constrained Path structure
60 *
61 * @return Pointer to the destination Constrained Path structure
62 */
63 static struct c_path *cpath_copy(struct c_path *dest, const struct c_path *src)
64 {
65 struct c_path *new_path;
66
67 if (!src)
68 return dest;
69
70 if (!dest) {
71 new_path = XCALLOC(MTYPE_PCA, sizeof(struct c_path));
72 } else {
73 new_path = dest;
74 if (dest->edges)
75 list_delete(&new_path->edges);
76 }
77
78 new_path->dst = src->dst;
79 new_path->weight = src->weight;
80 new_path->edges = list_dup(src->edges);
81 new_path->status = src->status;
82
83 return new_path;
84 }
85
86 /**
87 * Delete Constrained Path structure. Previous allocated memory is freed.
88 *
89 * @param path Constrained Path structure to be deleted
90 */
91 void cpath_del(struct c_path *path)
92 {
93 if (!path)
94 return;
95
96 if (path->edges)
97 list_delete(&path->edges);
98
99 XFREE(MTYPE_PCA, path);
100 path = NULL;
101 }
102
103 /**
104 * Replace the list of edges in the next Constrained Path by the list of edges
105 * in the current Constrained Path.
106 *
107 * @param next_path next Constrained Path structure
108 * @param cur_path current Constrained Path structure
109 */
110 static void cpath_replace(struct c_path *next_path, struct c_path *cur_path)
111 {
112
113 if (next_path->edges)
114 list_delete(&next_path->edges);
115
116 next_path->edges = list_dup(cur_path->edges);
117 }
118
119 /**
120 * Create a new Visited Node structure from the provided Vertex. Structure is
121 * dynamically allocated.
122 *
123 * @param vertex Vertex structure
124 *
125 * @return Pointer to the new Visited Node structure
126 */
127 static struct v_node *vnode_new(struct ls_vertex *vertex)
128 {
129 struct v_node *vnode;
130
131 if (!vertex)
132 return NULL;
133
134 vnode = XCALLOC(MTYPE_PCA, sizeof(struct v_node));
135 vnode->vertex = vertex;
136 vnode->key = vertex->key;
137
138 return vnode;
139 }
140
141 /**
142 * Delete Visited Node structure. Previous allocated memory is freed.
143 *
144 * @param vnode Visited Node structure to be deleted
145 */
146 static void vnode_del(struct v_node *vnode)
147 {
148 if (!vnode)
149 return;
150
151 XFREE(MTYPE_PCA, vnode);
152 vnode = NULL;
153 }
154
155 /**
156 * Search Vertex in TED by IPv4 address. The function search vertex by browsing
157 * the subnets table. It allows to find not only vertex by router ID, but also
158 * vertex by interface IPv4 address.
159 *
160 * @param ted Traffic Engineering Database
161 * @param ipv4 IPv4 address
162 *
163 * @return Vertex if found, NULL otherwise
164 */
165 static struct ls_vertex *get_vertex_by_ipv4(struct ls_ted *ted,
166 struct in_addr ipv4)
167 {
168 struct ls_subnet *subnet;
169 struct prefix p;
170
171 p.family = AF_INET;
172 p.u.prefix4 = ipv4;
173
174 frr_each (subnets, &ted->subnets, subnet) {
175 if (subnet->key.family != AF_INET)
176 continue;
177 p.prefixlen = subnet->key.prefixlen;
178 if (prefix_same(&subnet->key, &p))
179 return subnet->vertex;
180 }
181
182 return NULL;
183 }
184
185 /**
186 * Search Vertex in TED by IPv6 address. The function search vertex by browsing
187 * the subnets table. It allows to find not only vertex by router ID, but also
188 * vertex by interface IPv6 address.
189 *
190 * @param ted Traffic Engineering Database
191 * @param ipv6 IPv6 address
192 *
193 * @return Vertex if found, NULL otherwise
194 */
195 static struct ls_vertex *get_vertex_by_ipv6(struct ls_ted *ted,
196 struct in6_addr ipv6)
197 {
198 struct ls_subnet *subnet;
199 struct prefix p;
200
201 p.family = AF_INET6;
202 p.u.prefix6 = ipv6;
203
204 frr_each (subnets, &ted->subnets, subnet) {
205 if (subnet->key.family != AF_INET6)
206 continue;
207 p.prefixlen = subnet->key.prefixlen;
208 if (prefix_cmp(&subnet->key, &p) == 0)
209 return subnet->vertex;
210 }
211
212 return NULL;
213 }
214
215 struct cspf *cspf_new(void)
216 {
217 struct cspf *algo;
218
219 /* Allocate New CSPF structure */
220 algo = XCALLOC(MTYPE_PCA, sizeof(struct cspf));
221
222 /* Initialize RB-Trees */
223 processed_init(&algo->processed);
224 visited_init(&algo->visited);
225 pqueue_init(&algo->pqueue);
226
227 algo->path = NULL;
228 algo->pdst = NULL;
229
230 return algo;
231 }
232
233 struct cspf *cspf_init(struct cspf *algo, const struct ls_vertex *src,
234 const struct ls_vertex *dst, struct constraints *csts)
235 {
236 struct cspf *new_algo;
237 struct c_path *psrc;
238
239 if (!csts)
240 return NULL;
241
242 if (!algo)
243 new_algo = cspf_new();
244 else
245 new_algo = algo;
246
247 /* Initialize Processed Path and Priority Queue with Src & Dst */
248 if (src) {
249 psrc = cpath_new(src->key);
250 psrc->weight = 0;
251 processed_add(&new_algo->processed, psrc);
252 pqueue_add(&new_algo->pqueue, psrc);
253 new_algo->path = psrc;
254 }
255 if (dst) {
256 new_algo->pdst = cpath_new(dst->key);
257 processed_add(&new_algo->processed, new_algo->pdst);
258 }
259
260 memcpy(&new_algo->csts, csts, sizeof(struct constraints));
261
262 return new_algo;
263 }
264
265 struct cspf *cspf_init_v4(struct cspf *algo, struct ls_ted *ted,
266 const struct in_addr src, const struct in_addr dst,
267 struct constraints *csts)
268 {
269 struct ls_vertex *vsrc;
270 struct ls_vertex *vdst;
271 struct cspf *new_algo;
272
273 /* Sanity Check */
274 if (!ted)
275 return algo;
276
277 if (!algo)
278 new_algo = cspf_new();
279 else
280 new_algo = algo;
281
282 /* Got Source and Destination Vertex from TED */
283 vsrc = get_vertex_by_ipv4(ted, src);
284 vdst = get_vertex_by_ipv4(ted, dst);
285 csts->family = AF_INET;
286
287 return cspf_init(new_algo, vsrc, vdst, csts);
288 }
289
290 struct cspf *cspf_init_v6(struct cspf *algo, struct ls_ted *ted,
291 const struct in6_addr src, const struct in6_addr dst,
292 struct constraints *csts)
293 {
294 struct ls_vertex *vsrc;
295 struct ls_vertex *vdst;
296 struct cspf *new_algo;
297
298 /* Sanity Check */
299 if (!ted)
300 return algo;
301
302 if (!algo)
303 new_algo = cspf_new();
304 else
305 new_algo = algo;
306
307 /* Got Source and Destination Vertex from TED */
308 vsrc = get_vertex_by_ipv6(ted, src);
309 vdst = get_vertex_by_ipv6(ted, dst);
310 csts->family = AF_INET6;
311
312 return cspf_init(new_algo, vsrc, vdst, csts);
313 }
314
315 void cspf_clean(struct cspf *algo)
316 {
317 struct c_path *path;
318 struct v_node *vnode;
319
320 if (!algo)
321 return;
322
323 /* Normally, Priority Queue is empty. Clean it in case of. */
324 if (pqueue_count(&algo->pqueue)) {
325 frr_each_safe (pqueue, &algo->pqueue, path) {
326 pqueue_del(&algo->pqueue, path);
327 }
328 }
329
330 /* Empty Processed Path tree and associated Path */
331 if (processed_count(&algo->processed)) {
332 frr_each_safe (processed, &algo->processed, path) {
333 processed_del(&algo->processed, path);
334 cpath_del(path);
335 }
336 }
337
338 /* Empty visited Vertex tree and associated Node */
339 if (visited_count(&algo->visited)) {
340 frr_each_safe (visited, &algo->visited, vnode) {
341 visited_del(&algo->visited, vnode);
342 vnode_del(vnode);
343 }
344 }
345
346 memset(&algo->csts, 0, sizeof(struct constraints));
347 algo->path = NULL;
348 algo->pdst = NULL;
349 }
350
351 void cspf_del(struct cspf *algo)
352 {
353 if (!algo)
354 return;
355
356 /* Empty Priority Queue and Processes Path */
357 cspf_clean(algo);
358
359 /* Then, reset Priority Queue, Processed Path and Visited RB-Tree */
360 pqueue_fini(&algo->pqueue);
361 processed_fini(&algo->processed);
362 visited_fini(&algo->visited);
363
364 XFREE(MTYPE_PCA, algo);
365 algo = NULL;
366 }
367
368 /**
369 * Prune Edge if constraints are not met by testing Edge Attributes against
370 * given constraints and cumulative cost of the given constrained path.
371 *
372 * @param path On-going Computed Path with cumulative cost constraints
373 * @param edge Edge to be validate against Constraints
374 * @param csts Constraints for this path
375 *
376 * @return True if Edge should be prune, false if Edge is valid
377 */
378 static bool prune_edge(const struct c_path *path, const struct ls_edge *edge,
379 const struct constraints *csts)
380 {
381 struct ls_vertex *dst;
382 struct ls_attributes *attr;
383
384 /* Check that Path, Edge and Constraints are valid */
385 if (!path || !edge || !csts)
386 return true;
387
388 /* Check that Edge has a valid destination */
389 if (!edge->destination)
390 return true;
391 dst = edge->destination;
392
393 /* Check that Edge has valid attributes */
394 if (!edge->attributes)
395 return true;
396 attr = edge->attributes;
397
398 /* Check that Edge belongs to the requested Address Family and type */
399 if (csts->family == AF_INET) {
400 if (IPV4_NET0(attr->standard.local.s_addr))
401 return true;
402 if (csts->type == SR_TE)
403 if (!CHECK_FLAG(attr->flags, LS_ATTR_ADJ_SID) ||
404 !CHECK_FLAG(dst->node->flags, LS_NODE_SR))
405 return true;
406 }
407 if (csts->family == AF_INET6) {
408 if (IN6_IS_ADDR_UNSPECIFIED(&attr->standard.local6))
409 return true;
410 if (csts->type == SR_TE)
411 if (!CHECK_FLAG(attr->flags, LS_ATTR_ADJ_SID6) ||
412 !CHECK_FLAG(dst->node->flags, LS_NODE_SR))
413 return true;
414 }
415
416 /*
417 * Check that total cost, up to this edge, respects the initial
418 * constraints
419 */
420 switch (csts->ctype) {
421 case CSPF_METRIC:
422 if (!CHECK_FLAG(attr->flags, LS_ATTR_METRIC))
423 return true;
424 if ((attr->metric + path->weight) > csts->cost)
425 return true;
426 break;
427
428 case CSPF_TE_METRIC:
429 if (!CHECK_FLAG(attr->flags, LS_ATTR_TE_METRIC))
430 return true;
431 if ((attr->standard.te_metric + path->weight) > csts->cost)
432 return true;
433 break;
434
435 case CSPF_DELAY:
436 if (!CHECK_FLAG(attr->flags, LS_ATTR_DELAY))
437 return true;
438 if ((attr->extended.delay + path->weight) > csts->cost)
439 return true;
440 break;
441 }
442
443 /* If specified, check that Edge meet Bandwidth constraint */
444 if (csts->bw > 0.0) {
445 if (attr->standard.max_bw < csts->bw ||
446 attr->standard.max_rsv_bw < csts->bw ||
447 attr->standard.unrsv_bw[csts->cos] < csts->bw)
448 return true;
449 }
450
451 /* All is fine. We can consider this Edge valid, so not to be prune */
452 return false;
453 }
454
455 /**
456 * Relax constraints of the current path up to the destination vertex of the
457 * provided Edge. This function progress in the network topology by validating
458 * the next vertex on the computed path. If Vertex has not already been visited,
459 * list of edges of the current path is augmented with this edge if the new cost
460 * is lower than prior path up to this vertex. Current path is re-inserted in
461 * the Priority Queue with its new cost i.e. current cost + edge cost.
462 *
463 * @param algo CSPF structure
464 * @param edge Next Edge to be added to the current computed path
465 *
466 * @return True if current path reach destination, false otherwise
467 */
468 static bool relax_constraints(struct cspf *algo, struct ls_edge *edge)
469 {
470
471 struct c_path pkey = {};
472 struct c_path *next_path;
473 struct v_node vnode = {};
474 uint32_t total_cost = MAX_COST;
475
476 /* Verify that we have a current computed path */
477 if (!algo->path)
478 return false;
479
480 /* Verify if we have not visited the next Vertex to avoid loop */
481 vnode.key = edge->destination->key;
482 if (visited_member(&algo->visited, &vnode)) {
483 return false;
484 }
485
486 /*
487 * Get Next Computed Path from next vertex key
488 * or create a new one if it has not yet computed.
489 */
490 pkey.dst = edge->destination->key;
491 next_path = processed_find(&algo->processed, &pkey);
492 if (!next_path) {
493 next_path = cpath_new(pkey.dst);
494 processed_add(&algo->processed, next_path);
495 }
496
497 /*
498 * Add or update the Computed Path in the Priority Queue if total cost
499 * is lower than cost associated to this next Vertex. This could occurs
500 * if we process a Vertex that as not yet been visited in the Graph
501 * or if we found a shortest path up to this Vertex.
502 */
503 switch (algo->csts.ctype) {
504 case CSPF_METRIC:
505 total_cost = edge->attributes->metric + algo->path->weight;
506 break;
507 case CSPF_TE_METRIC:
508 total_cost = edge->attributes->standard.te_metric +
509 algo->path->weight;
510 break;
511 case CSPF_DELAY:
512 total_cost =
513 edge->attributes->extended.delay + algo->path->weight;
514 break;
515 default:
516 break;
517 }
518 if (total_cost < next_path->weight) {
519 /*
520 * It is not possible to directly update the q_path in the
521 * Priority Queue. Indeed, if we modify the path weight, the
522 * Priority Queue must be re-ordered. So, we need fist to remove
523 * the q_path if it is present in the Priority Queue, then,
524 * update the Path, in particular the Weight, and finally
525 * (re-)insert it in the Priority Queue.
526 */
527 struct c_path *path;
528 frr_each_safe (pqueue, &algo->pqueue, path) {
529 if (path->dst == pkey.dst) {
530 pqueue_del(&algo->pqueue, path);
531 break;
532 }
533 }
534 next_path->weight = total_cost;
535 cpath_replace(next_path, algo->path);
536 listnode_add(next_path->edges, edge);
537 pqueue_add(&algo->pqueue, next_path);
538 }
539
540 /* Return True if we reach the destination */
541 return (next_path->dst == algo->pdst->dst);
542 }
543
544 struct c_path *compute_p2p_path(struct cspf *algo, struct ls_ted *ted)
545 {
546 struct listnode *node;
547 struct ls_vertex *vertex;
548 struct ls_edge *edge;
549 struct c_path *optim_path;
550 struct v_node *vnode;
551 uint32_t cur_cost;
552
553 optim_path = cpath_new(0xFFFFFFFFFFFFFFFF);
554 optim_path->status = FAILED;
555
556 /* Check that all is correctly initialized */
557 if (!algo)
558 return optim_path;
559
560 if (!algo->csts.ctype)
561 return optim_path;
562
563 if (!algo->pdst) {
564 optim_path->status = NO_DESTINATION;
565 return optim_path;
566 }
567
568 if (!algo->path) {
569 optim_path->status = NO_SOURCE;
570 return optim_path;
571 }
572
573 if (algo->pdst->dst == algo->path->dst) {
574 optim_path->status = SAME_SRC_DST;
575 return optim_path;
576 }
577
578 optim_path->dst = algo->pdst->dst;
579 optim_path->status = IN_PROGRESS;
580
581 /*
582 * Process all Connected Vertex until priority queue becomes empty.
583 * Connected Vertices are added into the priority queue when
584 * processing the next Connected Vertex: see relax_constraints()
585 */
586 cur_cost = MAX_COST;
587 while (pqueue_count(&algo->pqueue) != 0) {
588 /* Got shortest current Path from the Priority Queue */
589 algo->path = pqueue_pop(&algo->pqueue);
590
591 /* Add destination Vertex of this path to the visited RB Tree */
592 vertex = ls_find_vertex_by_key(ted, algo->path->dst);
593 if (!vertex)
594 continue;
595 vnode = vnode_new(vertex);
596 visited_add(&algo->visited, vnode);
597
598 /* Process all outgoing links from this Vertex */
599 for (ALL_LIST_ELEMENTS_RO(vertex->outgoing_edges, node, edge)) {
600 /*
601 * Skip Connected Edges that must be prune i.e.
602 * Edges that not satisfy the given constraints,
603 * in particular the Bandwidth, TE Metric and Delay.
604 */
605 if (prune_edge(algo->path, edge, &algo->csts))
606 continue;
607
608 /*
609 * Relax constraints and check if we got a shorter
610 * candidate path
611 */
612 if (relax_constraints(algo, edge) &&
613 algo->pdst->weight < cur_cost) {
614 cur_cost = algo->pdst->weight;
615 cpath_copy(optim_path, algo->pdst);
616 optim_path->status = SUCCESS;
617 }
618 }
619 }
620
621 /*
622 * The priority queue is empty => all the possible (vertex, path)
623 * elements have been explored. The optim_path contains the optimal
624 * path if it exists. Otherwise an empty path with status failed is
625 * returned.
626 */
627 if (optim_path->status == IN_PROGRESS ||
628 listcount(optim_path->edges) == 0)
629 optim_path->status = FAILED;
630 cspf_clean(algo);
631
632 return optim_path;
633 }