]> git.proxmox.com Git - mirror_frr.git/blob - pathd/path_pcep_pcc.c
Merge pull request #8106 from donaldsharp/fix_bad_interaction
[mirror_frr.git] / pathd / path_pcep_pcc.c
1 /*
2 * Copyright (C) 2020 NetDEF, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; see the file COPYING; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 /* TODOS AND KNOWN ISSUES:
20 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
21 by the PCE or by NB.
22 - Revert the hacks to work around ODL requiring a report with
23 operational status DOWN when an LSP is activated.
24 - Enforce only the PCE a policy has been delegated to can update it.
25 - If the router-id is used because the PCC IP is not specified
26 (either IPv4 or IPv6), the connection to the PCE is not reset
27 when the router-id changes.
28 */
29
30 #include <zebra.h>
31
32 #include "log.h"
33 #include "command.h"
34 #include "libfrr.h"
35 #include "printfrr.h"
36 #include "version.h"
37 #include "northbound.h"
38 #include "frr_pthread.h"
39 #include "jhash.h"
40
41 #include "pathd/pathd.h"
42 #include "pathd/path_zebra.h"
43 #include "pathd/path_errors.h"
44 #include "pathd/path_pcep_memory.h"
45 #include "pathd/path_pcep.h"
46 #include "pathd/path_pcep_controller.h"
47 #include "pathd/path_pcep_lib.h"
48 #include "pathd/path_pcep_config.h"
49 #include "pathd/path_pcep_debug.h"
50
51
52 /* The number of time we will skip connecting if we are missing the PCC
53 * address for an inet family different from the selected transport one*/
54 #define OTHER_FAMILY_MAX_RETRIES 4
55 #define MAX_ERROR_MSG_SIZE 256
56 #define MAX_COMPREQ_TRIES 3
57
58
59 /* PCEP Event Handler */
60 static void handle_pcep_open(struct ctrl_state *ctrl_state,
61 struct pcc_state *pcc_state,
62 struct pcep_message *msg);
63 static void handle_pcep_message(struct ctrl_state *ctrl_state,
64 struct pcc_state *pcc_state,
65 struct pcep_message *msg);
66 static void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
67 struct pcc_state *pcc_state,
68 struct pcep_message *msg);
69 static void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
70 struct pcc_state *pcc_state,
71 struct pcep_message *msg);
72 static void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
73 struct pcc_state *pcc_state,
74 struct pcep_message *msg);
75
76 /* Internal Functions */
77 static const char *ipaddr_type_name(struct ipaddr *addr);
78 static bool filter_path(struct pcc_state *pcc_state, struct path *path);
79 static void select_pcc_addresses(struct pcc_state *pcc_state);
80 static void select_transport_address(struct pcc_state *pcc_state);
81 static void update_tag(struct pcc_state *pcc_state);
82 static void update_originator(struct pcc_state *pcc_state);
83 static void schedule_reconnect(struct ctrl_state *ctrl_state,
84 struct pcc_state *pcc_state);
85 static void schedule_session_timeout(struct ctrl_state *ctrl_state,
86 struct pcc_state *pcc_state);
87 static void cancel_session_timeout(struct ctrl_state *ctrl_state,
88 struct pcc_state *pcc_state);
89 static void send_pcep_message(struct pcc_state *pcc_state,
90 struct pcep_message *msg);
91 static void send_pcep_error(struct pcc_state *pcc_state,
92 enum pcep_error_type error_type,
93 enum pcep_error_value error_value);
94 static void send_report(struct pcc_state *pcc_state, struct path *path);
95 static void send_comp_request(struct ctrl_state *ctrl_state,
96 struct pcc_state *pcc_state,
97 struct req_entry *req);
98 static void cancel_comp_requests(struct ctrl_state *ctrl_state,
99 struct pcc_state *pcc_state);
100 static void cancel_comp_request(struct ctrl_state *ctrl_state,
101 struct pcc_state *pcc_state,
102 struct req_entry *req);
103 static void specialize_outgoing_path(struct pcc_state *pcc_state,
104 struct path *path);
105 static void specialize_incoming_path(struct pcc_state *pcc_state,
106 struct path *path);
107 static bool validate_incoming_path(struct pcc_state *pcc_state,
108 struct path *path, char *errbuff,
109 size_t buffsize);
110 static void set_pcc_address(struct pcc_state *pcc_state,
111 struct lsp_nb_key *nbkey, struct ipaddr *addr);
112 static int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs);
113 static int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs);
114 static int get_previous_best_pce(struct pcc_state **pcc);
115 static int get_best_pce(struct pcc_state **pcc);
116 static int get_pce_count_connected(struct pcc_state **pcc);
117 static bool update_best_pce(struct pcc_state **pcc, int best);
118
119 /* Data Structure Helper Functions */
120 static void lookup_plspid(struct pcc_state *pcc_state, struct path *path);
121 static void lookup_nbkey(struct pcc_state *pcc_state, struct path *path);
122 static void free_req_entry(struct req_entry *req);
123 static struct req_entry *push_new_req(struct pcc_state *pcc_state,
124 struct path *path);
125 static void repush_req(struct pcc_state *pcc_state, struct req_entry *req);
126 static struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid);
127 static bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path);
128 static void remove_reqid_mapping(struct pcc_state *pcc_state,
129 struct path *path);
130 static uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path);
131 static bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path);
132
133 /* Data Structure Callbacks */
134 static int plspid_map_cmp(const struct plspid_map_data *a,
135 const struct plspid_map_data *b);
136 static uint32_t plspid_map_hash(const struct plspid_map_data *e);
137 static int nbkey_map_cmp(const struct nbkey_map_data *a,
138 const struct nbkey_map_data *b);
139 static uint32_t nbkey_map_hash(const struct nbkey_map_data *e);
140 static int req_map_cmp(const struct req_map_data *a,
141 const struct req_map_data *b);
142 static uint32_t req_map_hash(const struct req_map_data *e);
143
144 /* Data Structure Declarations */
145 DECLARE_HASH(plspid_map, struct plspid_map_data, mi, plspid_map_cmp,
146 plspid_map_hash)
147 DECLARE_HASH(nbkey_map, struct nbkey_map_data, mi, nbkey_map_cmp,
148 nbkey_map_hash)
149 DECLARE_HASH(req_map, struct req_map_data, mi, req_map_cmp, req_map_hash)
150
151 static inline int req_entry_compare(const struct req_entry *a,
152 const struct req_entry *b)
153 {
154 return a->path->req_id - b->path->req_id;
155 }
156 RB_GENERATE(req_entry_head, req_entry, entry, req_entry_compare)
157
158
159 /* ------------ API Functions ------------ */
160
161 struct pcc_state *pcep_pcc_initialize(struct ctrl_state *ctrl_state, int index)
162 {
163 struct pcc_state *pcc_state = XCALLOC(MTYPE_PCEP, sizeof(*pcc_state));
164
165 pcc_state->id = index;
166 pcc_state->status = PCEP_PCC_DISCONNECTED;
167 pcc_state->next_reqid = 1;
168 pcc_state->next_plspid = 1;
169
170 RB_INIT(req_entry_head, &pcc_state->requests);
171
172 update_tag(pcc_state);
173 update_originator(pcc_state);
174
175 PCEP_DEBUG("%s PCC initialized", pcc_state->tag);
176
177 return pcc_state;
178 }
179
180 void pcep_pcc_finalize(struct ctrl_state *ctrl_state,
181 struct pcc_state *pcc_state)
182 {
183 PCEP_DEBUG("%s PCC finalizing...", pcc_state->tag);
184
185 pcep_pcc_disable(ctrl_state, pcc_state);
186
187 if (pcc_state->pcc_opts != NULL) {
188 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
189 pcc_state->pcc_opts = NULL;
190 }
191 if (pcc_state->pce_opts != NULL) {
192 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
193 pcc_state->pce_opts = NULL;
194 }
195 if (pcc_state->originator != NULL) {
196 XFREE(MTYPE_PCEP, pcc_state->originator);
197 pcc_state->originator = NULL;
198 }
199
200 if (pcc_state->t_reconnect != NULL) {
201 thread_cancel(&pcc_state->t_reconnect);
202 pcc_state->t_reconnect = NULL;
203 }
204
205 if (pcc_state->t_update_best != NULL) {
206 thread_cancel(&pcc_state->t_update_best);
207 pcc_state->t_update_best = NULL;
208 }
209
210 if (pcc_state->t_session_timeout != NULL) {
211 thread_cancel(&pcc_state->t_session_timeout);
212 pcc_state->t_session_timeout = NULL;
213 }
214
215 XFREE(MTYPE_PCEP, pcc_state);
216 }
217
218 int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs)
219 {
220 int retval;
221
222 if (lhs == NULL) {
223 return 1;
224 }
225
226 if (rhs == NULL) {
227 return -1;
228 }
229
230 retval = lhs->port - rhs->port;
231 if (retval != 0) {
232 return retval;
233 }
234
235 retval = lhs->msd - rhs->msd;
236 if (retval != 0) {
237 return retval;
238 }
239
240 if (IS_IPADDR_V4(&lhs->addr)) {
241 retval = memcmp(&lhs->addr.ipaddr_v4, &rhs->addr.ipaddr_v4,
242 sizeof(lhs->addr.ipaddr_v4));
243 if (retval != 0) {
244 return retval;
245 }
246 } else if (IS_IPADDR_V6(&lhs->addr)) {
247 retval = memcmp(&lhs->addr.ipaddr_v6, &rhs->addr.ipaddr_v6,
248 sizeof(lhs->addr.ipaddr_v6));
249 if (retval != 0) {
250 return retval;
251 }
252 }
253
254 return 0;
255 }
256
257 int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs)
258 {
259 if (lhs == NULL) {
260 return 1;
261 }
262
263 if (rhs == NULL) {
264 return -1;
265 }
266
267 int retval = lhs->port - rhs->port;
268 if (retval != 0) {
269 return retval;
270 }
271
272 retval = strcmp(lhs->pce_name, rhs->pce_name);
273 if (retval != 0) {
274 return retval;
275 }
276
277 retval = lhs->precedence - rhs->precedence;
278 if (retval != 0) {
279 return retval;
280 }
281
282 retval = memcmp(&lhs->addr, &rhs->addr, sizeof(lhs->addr));
283 if (retval != 0) {
284 return retval;
285 }
286
287 return 0;
288 }
289
290 int pcep_pcc_update(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state,
291 struct pcc_opts *pcc_opts, struct pce_opts *pce_opts)
292 {
293 int ret = 0;
294
295 // If the options did not change, then there is nothing to do
296 if ((compare_pce_opts(pce_opts, pcc_state->pce_opts) == 0)
297 && (compare_pcc_opts(pcc_opts, pcc_state->pcc_opts) == 0)) {
298 return ret;
299 }
300
301 if ((ret = pcep_pcc_disable(ctrl_state, pcc_state))) {
302 XFREE(MTYPE_PCEP, pcc_opts);
303 XFREE(MTYPE_PCEP, pce_opts);
304 return ret;
305 }
306
307 if (pcc_state->pcc_opts != NULL) {
308 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
309 }
310 if (pcc_state->pce_opts != NULL) {
311 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
312 }
313
314 pcc_state->pcc_opts = pcc_opts;
315 pcc_state->pce_opts = pce_opts;
316
317 if (IS_IPADDR_V4(&pcc_opts->addr)) {
318 pcc_state->pcc_addr_v4 = pcc_opts->addr.ipaddr_v4;
319 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
320 } else {
321 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
322 }
323
324 if (IS_IPADDR_V6(&pcc_opts->addr)) {
325 memcpy(&pcc_state->pcc_addr_v6, &pcc_opts->addr.ipaddr_v6,
326 sizeof(struct in6_addr));
327 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
328 } else {
329 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
330 }
331
332 update_tag(pcc_state);
333 update_originator(pcc_state);
334
335 return pcep_pcc_enable(ctrl_state, pcc_state);
336 }
337
338 void pcep_pcc_reconnect(struct ctrl_state *ctrl_state,
339 struct pcc_state *pcc_state)
340 {
341 if (pcc_state->status == PCEP_PCC_DISCONNECTED)
342 pcep_pcc_enable(ctrl_state, pcc_state);
343 }
344
345 int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
346 {
347 char pcc_buff[40];
348 char pce_buff[40];
349
350 assert(pcc_state->status == PCEP_PCC_DISCONNECTED);
351 assert(pcc_state->sess == NULL);
352
353 if (pcc_state->t_reconnect != NULL) {
354 thread_cancel(&pcc_state->t_reconnect);
355 pcc_state->t_reconnect = NULL;
356 }
357
358 select_transport_address(pcc_state);
359
360 /* Even though we are connecting using IPv6. we want to have an IPv4
361 * address so we can handle candidate path with IPv4 endpoints */
362 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
363 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
364 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
365 "skipping connection to PCE %s:%d due to "
366 "missing PCC IPv4 address",
367 ipaddr2str(&pcc_state->pce_opts->addr,
368 pce_buff, sizeof(pce_buff)),
369 pcc_state->pce_opts->port);
370 schedule_reconnect(ctrl_state, pcc_state);
371 return 0;
372 } else {
373 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
374 "missing IPv4 PCC address, IPv4 candidate "
375 "paths will be ignored");
376 }
377 }
378
379 /* Even though we are connecting using IPv4. we want to have an IPv6
380 * address so we can handle candidate path with IPv6 endpoints */
381 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
382 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
383 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
384 "skipping connection to PCE %s:%d due to "
385 "missing PCC IPv6 address",
386 ipaddr2str(&pcc_state->pce_opts->addr,
387 pce_buff, sizeof(pce_buff)),
388 pcc_state->pce_opts->port);
389 schedule_reconnect(ctrl_state, pcc_state);
390 return 0;
391 } else {
392 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
393 "missing IPv6 PCC address, IPv6 candidate "
394 "paths will be ignored");
395 }
396 }
397
398 /* Even if the maximum retries to try to have all the familly addresses
399 * have been spent, we still need the one for the transport familly */
400 if (pcc_state->pcc_addr_tr.ipa_type == IPADDR_NONE) {
401 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
402 "skipping connection to PCE %s:%d due to missing "
403 "PCC address",
404 ipaddr2str(&pcc_state->pce_opts->addr, pce_buff,
405 sizeof(pce_buff)),
406 pcc_state->pce_opts->port);
407 schedule_reconnect(ctrl_state, pcc_state);
408 return 0;
409 }
410
411 PCEP_DEBUG("%s PCC connecting", pcc_state->tag);
412 pcc_state->sess = pcep_lib_connect(
413 &pcc_state->pcc_addr_tr, pcc_state->pcc_opts->port,
414 &pcc_state->pce_opts->addr, pcc_state->pce_opts->port,
415 pcc_state->pcc_opts->msd, &pcc_state->pce_opts->config_opts);
416
417 if (pcc_state->sess == NULL) {
418 flog_warn(EC_PATH_PCEP_LIB_CONNECT,
419 "failed to connect to PCE %s:%d from %s:%d",
420 ipaddr2str(&pcc_state->pce_opts->addr, pce_buff,
421 sizeof(pce_buff)),
422 pcc_state->pce_opts->port,
423 ipaddr2str(&pcc_state->pcc_addr_tr, pcc_buff,
424 sizeof(pcc_buff)),
425 pcc_state->pcc_opts->port);
426 schedule_reconnect(ctrl_state, pcc_state);
427 return 0;
428 }
429
430 // In case some best pce alternative were waiting to activate
431 if (pcc_state->t_update_best != NULL) {
432 thread_cancel(&pcc_state->t_update_best);
433 pcc_state->t_update_best = NULL;
434 }
435
436 pcc_state->status = PCEP_PCC_CONNECTING;
437
438 return 0;
439 }
440
441 int pcep_pcc_disable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
442 {
443 switch (pcc_state->status) {
444 case PCEP_PCC_DISCONNECTED:
445 return 0;
446 case PCEP_PCC_CONNECTING:
447 case PCEP_PCC_SYNCHRONIZING:
448 case PCEP_PCC_OPERATING:
449 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state->tag);
450 cancel_comp_requests(ctrl_state, pcc_state);
451 pcep_lib_disconnect(pcc_state->sess);
452 /* No need to remove if any PCEs is connected */
453 if (get_pce_count_connected(ctrl_state->pcc) == 0) {
454 pcep_thread_remove_candidate_path_segments(ctrl_state,
455 pcc_state);
456 }
457 pcc_state->sess = NULL;
458 pcc_state->status = PCEP_PCC_DISCONNECTED;
459 return 0;
460 default:
461 return 1;
462 }
463 }
464
465 void pcep_pcc_sync_path(struct ctrl_state *ctrl_state,
466 struct pcc_state *pcc_state, struct path *path)
467 {
468 if (pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
469 path->is_synching = true;
470 } else if (pcc_state->status == PCEP_PCC_OPERATING)
471 path->is_synching = false;
472 else
473 return;
474
475 path->go_active = true;
476
477 /* Accumulate the dynamic paths without any LSP so computation
478 * requests can be performed after synchronization */
479 if ((path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
480 && (path->first_hop == NULL)
481 && !has_pending_req_for(pcc_state, path)) {
482 PCEP_DEBUG("%s Scheduling computation request for path %s",
483 pcc_state->tag, path->name);
484 push_new_req(pcc_state, path);
485 return;
486 }
487
488 /* Synchronize the path if the PCE supports LSP updates and the
489 * endpoint address familly is supported */
490 if (pcc_state->caps.is_stateful) {
491 if (filter_path(pcc_state, path)) {
492 PCEP_DEBUG("%s Synchronizing path %s", pcc_state->tag,
493 path->name);
494 send_report(pcc_state, path);
495 } else {
496 PCEP_DEBUG(
497 "%s Skipping %s candidate path %s "
498 "synchronization",
499 pcc_state->tag,
500 ipaddr_type_name(&path->nbkey.endpoint),
501 path->name);
502 }
503 }
504 }
505
506 void pcep_pcc_sync_done(struct ctrl_state *ctrl_state,
507 struct pcc_state *pcc_state)
508 {
509 struct req_entry *req;
510
511 if (pcc_state->status != PCEP_PCC_SYNCHRONIZING
512 && pcc_state->status != PCEP_PCC_OPERATING)
513 return;
514
515 if (pcc_state->caps.is_stateful
516 && pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
517 struct path *path = pcep_new_path();
518 *path = (struct path){.name = NULL,
519 .srp_id = 0,
520 .plsp_id = 0,
521 .status = PCEP_LSP_OPERATIONAL_DOWN,
522 .do_remove = false,
523 .go_active = false,
524 .was_created = false,
525 .was_removed = false,
526 .is_synching = false,
527 .is_delegated = false,
528 .first_hop = NULL,
529 .first_metric = NULL};
530 send_report(pcc_state, path);
531 pcep_free_path(path);
532 }
533
534 pcc_state->synchronized = true;
535 pcc_state->status = PCEP_PCC_OPERATING;
536
537 PCEP_DEBUG("%s Synchronization done", pcc_state->tag);
538
539 /* Start the computation request accumulated during synchronization */
540 RB_FOREACH (req, req_entry_head, &pcc_state->requests) {
541 send_comp_request(ctrl_state, pcc_state, req);
542 }
543 }
544
545 void pcep_pcc_send_report(struct ctrl_state *ctrl_state,
546 struct pcc_state *pcc_state, struct path *path)
547 {
548 if (pcc_state->status != PCEP_PCC_OPERATING)
549 return;
550
551 if (pcc_state->caps.is_stateful) {
552 PCEP_DEBUG("%s Send report for candidate path %s",
553 pcc_state->tag, path->name);
554 send_report(pcc_state, path);
555 }
556 }
557
558 /* ------------ Timeout handler ------------ */
559
560 void pcep_pcc_timeout_handler(struct ctrl_state *ctrl_state,
561 struct pcc_state *pcc_state,
562 enum pcep_ctrl_timer_type type, void *param)
563 {
564 struct req_entry *req;
565
566 switch (type) {
567 case TO_COMPUTATION_REQUEST:
568 assert(param != NULL);
569 req = (struct req_entry *)param;
570 pop_req(pcc_state, req->path->req_id);
571 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT,
572 "Computation request %d timeout", req->path->req_id);
573 cancel_comp_request(ctrl_state, pcc_state, req);
574 if (req->retry_count++ < MAX_COMPREQ_TRIES) {
575 repush_req(pcc_state, req);
576 send_comp_request(ctrl_state, pcc_state, req);
577 return;
578 }
579 if (pcc_state->caps.is_stateful) {
580 struct path *path;
581 PCEP_DEBUG(
582 "%s Delegating undefined dynamic path %s to PCE %s",
583 pcc_state->tag, req->path->name,
584 pcc_state->originator);
585 path = pcep_copy_path(req->path);
586 path->is_delegated = true;
587 send_report(pcc_state, path);
588 free_req_entry(req);
589 }
590 break;
591 default:
592 break;
593 }
594 }
595
596
597 /* ------------ Pathd event handler ------------ */
598
599 void pcep_pcc_pathd_event_handler(struct ctrl_state *ctrl_state,
600 struct pcc_state *pcc_state,
601 enum pcep_pathd_event_type type,
602 struct path *path)
603 {
604 struct req_entry *req;
605
606 if (pcc_state->status != PCEP_PCC_OPERATING)
607 return;
608
609 /* Skipping candidate path with endpoint that do not match the
610 * configured or deduced PCC IP version */
611 if (!filter_path(pcc_state, path)) {
612 PCEP_DEBUG("%s Skipping %s candidate path %s event",
613 pcc_state->tag,
614 ipaddr_type_name(&path->nbkey.endpoint), path->name);
615 return;
616 }
617
618 switch (type) {
619 case PCEP_PATH_CREATED:
620 if (has_pending_req_for(pcc_state, path)) {
621 PCEP_DEBUG(
622 "%s Candidate path %s created, computation request already sent",
623 pcc_state->tag, path->name);
624 return;
625 }
626 PCEP_DEBUG("%s Candidate path %s created", pcc_state->tag,
627 path->name);
628 if ((path->first_hop == NULL)
629 && (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)) {
630 req = push_new_req(pcc_state, path);
631 send_comp_request(ctrl_state, pcc_state, req);
632 } else if (pcc_state->caps.is_stateful)
633 send_report(pcc_state, path);
634 return;
635 case PCEP_PATH_UPDATED:
636 PCEP_DEBUG("%s Candidate path %s updated", pcc_state->tag,
637 path->name);
638 if (pcc_state->caps.is_stateful)
639 send_report(pcc_state, path);
640 return;
641 case PCEP_PATH_REMOVED:
642 PCEP_DEBUG("%s Candidate path %s removed", pcc_state->tag,
643 path->name);
644 path->was_removed = true;
645 if (pcc_state->caps.is_stateful)
646 send_report(pcc_state, path);
647 return;
648 default:
649 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR,
650 "Unexpected pathd event received by pcc %s: %u",
651 pcc_state->tag, type);
652 return;
653 }
654 }
655
656
657 /* ------------ PCEP event handler ------------ */
658
659 void pcep_pcc_pcep_event_handler(struct ctrl_state *ctrl_state,
660 struct pcc_state *pcc_state, pcep_event *event)
661 {
662 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state->tag,
663 pcep_event_type_name(event->event_type));
664 switch (event->event_type) {
665 case PCC_CONNECTED_TO_PCE:
666 assert(PCEP_PCC_CONNECTING == pcc_state->status);
667 PCEP_DEBUG("%s Connection established", pcc_state->tag);
668 pcc_state->status = PCEP_PCC_SYNCHRONIZING;
669 pcc_state->retry_count = 0;
670 pcc_state->synchronized = false;
671 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state->tag);
672 cancel_session_timeout(ctrl_state, pcc_state);
673 pcep_pcc_calculate_best_pce(ctrl_state->pcc);
674 pcep_thread_start_sync(ctrl_state, pcc_state->id);
675 break;
676 case PCC_SENT_INVALID_OPEN:
677 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state->tag);
678 PCEP_DEBUG(
679 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
680 pcc_state->tag,
681 pcc_state->sess->pcc_config
682 .keep_alive_pce_negotiated_timer_seconds,
683 pcc_state->sess->pcc_config
684 .dead_timer_pce_negotiated_seconds);
685 pcc_state->pce_opts->config_opts.keep_alive_seconds =
686 pcc_state->sess->pcc_config
687 .keep_alive_pce_negotiated_timer_seconds;
688 pcc_state->pce_opts->config_opts.dead_timer_seconds =
689 pcc_state->sess->pcc_config
690 .dead_timer_pce_negotiated_seconds;
691 break;
692
693 case PCC_RCVD_INVALID_OPEN:
694 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state->tag);
695 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state->tag,
696 format_pcep_message(event->message));
697 break;
698 case PCE_DEAD_TIMER_EXPIRED:
699 case PCE_CLOSED_SOCKET:
700 case PCE_SENT_PCEP_CLOSE:
701 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED:
702 case PCC_PCEP_SESSION_CLOSED:
703 case PCC_RCVD_MAX_INVALID_MSGS:
704 case PCC_RCVD_MAX_UNKOWN_MSGS:
705 pcep_pcc_disable(ctrl_state, pcc_state);
706 schedule_reconnect(ctrl_state, pcc_state);
707 schedule_session_timeout(ctrl_state, pcc_state);
708 break;
709 case MESSAGE_RECEIVED:
710 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state->tag,
711 format_pcep_message(event->message));
712 if (pcc_state->status == PCEP_PCC_CONNECTING) {
713 if (event->message->msg_header->type == PCEP_TYPE_OPEN)
714 handle_pcep_open(ctrl_state, pcc_state,
715 event->message);
716 break;
717 }
718 assert(pcc_state->status == PCEP_PCC_SYNCHRONIZING
719 || pcc_state->status == PCEP_PCC_OPERATING);
720 handle_pcep_message(ctrl_state, pcc_state, event->message);
721 break;
722 default:
723 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT,
724 "Unexpected event from pceplib: %s",
725 format_pcep_event(event));
726 break;
727 }
728 }
729
730
731 /*------------------ Multi-PCE --------------------- */
732
733 /* Internal util function, returns true if sync is necessary, false otherwise */
734 bool update_best_pce(struct pcc_state **pcc, int best)
735 {
736 PCEP_DEBUG(" recalculating pce precedence ");
737 if (best) {
738 struct pcc_state *best_pcc_state =
739 pcep_pcc_get_pcc_by_id(pcc, best);
740 if (best_pcc_state->previous_best != best_pcc_state->is_best) {
741 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
742 best_pcc_state->tag, best_pcc_state->id,
743 best_pcc_state->previous_best);
744 return true;
745 } else {
746 PCEP_DEBUG(
747 " %s No Resynch best (%i) previous best (%i)",
748 best_pcc_state->tag, best_pcc_state->id,
749 best_pcc_state->previous_best);
750 }
751 } else {
752 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
753 }
754
755 return false;
756 }
757
758 int get_best_pce(struct pcc_state **pcc)
759 {
760 for (int i = 0; i < MAX_PCC; i++) {
761 if (pcc[i] && pcc[i]->pce_opts) {
762 if (pcc[i]->is_best == true) {
763 return pcc[i]->id;
764 }
765 }
766 }
767 return 0;
768 }
769
770 int get_pce_count_connected(struct pcc_state **pcc)
771 {
772 int count = 0;
773 for (int i = 0; i < MAX_PCC; i++) {
774 if (pcc[i] && pcc[i]->pce_opts
775 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
776 count++;
777 }
778 }
779 return count;
780 }
781
782 int get_previous_best_pce(struct pcc_state **pcc)
783 {
784 int previous_best_pce = -1;
785
786 for (int i = 0; i < MAX_PCC; i++) {
787 if (pcc[i] && pcc[i]->pce_opts && pcc[i]->previous_best == true
788 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
789 previous_best_pce = i;
790 break;
791 }
792 }
793 return previous_best_pce != -1 ? pcc[previous_best_pce]->id : 0;
794 }
795
796 /* Called by path_pcep_controller EV_REMOVE_PCC
797 * Event handler when a PCC is removed. */
798 int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state *ctrl_state,
799 struct pcc_state **pcc)
800 {
801 int new_best_pcc_id = -1;
802 new_best_pcc_id = pcep_pcc_calculate_best_pce(pcc);
803 if (new_best_pcc_id) {
804 if (update_best_pce(ctrl_state->pcc, new_best_pcc_id) == true) {
805 pcep_thread_start_sync(ctrl_state, new_best_pcc_id);
806 }
807 }
808
809 return 0;
810 }
811
812 /* Called by path_pcep_controller EV_SYNC_PATH
813 * Event handler when a path is sync'd. */
814 int pcep_pcc_multi_pce_sync_path(struct ctrl_state *ctrl_state, int pcc_id,
815 struct pcc_state **pcc)
816 {
817 int previous_best_pcc_id = -1;
818
819 if (pcc_id == get_best_pce(pcc)) {
820 previous_best_pcc_id = get_previous_best_pce(pcc);
821 if (previous_best_pcc_id != 0) {
822 /* while adding new pce, path has to resync to the
823 * previous best. pcep_thread_start_sync() will be
824 * called by the calling function */
825 if (update_best_pce(ctrl_state->pcc,
826 previous_best_pcc_id)
827 == true) {
828 cancel_comp_requests(
829 ctrl_state,
830 pcep_pcc_get_pcc_by_id(
831 pcc, previous_best_pcc_id));
832 pcep_thread_start_sync(ctrl_state,
833 previous_best_pcc_id);
834 }
835 }
836 }
837
838 return 0;
839 }
840
841 /* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
842 * timer expires */
843 int pcep_pcc_timer_update_best_pce(struct ctrl_state *ctrl_state, int pcc_id)
844 {
845 int ret = 0;
846 /* resync whatever was the new best */
847 int prev_best = get_best_pce(ctrl_state->pcc);
848 int best_id = pcep_pcc_calculate_best_pce(ctrl_state->pcc);
849 if (best_id && prev_best != best_id) { // Avoid Multiple call
850 struct pcc_state *pcc_state =
851 pcep_pcc_get_pcc_by_id(ctrl_state->pcc, best_id);
852 if (update_best_pce(ctrl_state->pcc, pcc_state->id) == true) {
853 pcep_thread_start_sync(ctrl_state, pcc_state->id);
854 }
855 }
856
857 return ret;
858 }
859
860 /* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
861 * Returns the best PCE id */
862 int pcep_pcc_calculate_best_pce(struct pcc_state **pcc)
863 {
864 int best_precedence = 255; // DEFAULT_PCE_PRECEDENCE;
865 int best_pce = -1;
866 int one_connected_pce = -1;
867 int previous_best_pce = -1;
868 int step_0_best = -1;
869 int step_0_previous = -1;
870 int pcc_count = 0;
871
872 // Get state
873 for (int i = 0; i < MAX_PCC; i++) {
874 if (pcc[i] && pcc[i]->pce_opts) {
875 zlog_debug(
876 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
877 i, pcc[i]->is_best, pcc[i]->previous_best);
878 pcc_count++;
879
880 if (pcc[i]->is_best == true) {
881 step_0_best = i;
882 }
883 if (pcc[i]->previous_best == true) {
884 step_0_previous = i;
885 }
886 }
887 }
888
889 if (!pcc_count) {
890 return 0;
891 }
892
893 // Calculate best
894 for (int i = 0; i < MAX_PCC; i++) {
895 if (pcc[i] && pcc[i]->pce_opts
896 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
897 one_connected_pce = i; // In case none better
898 if (pcc[i]->pce_opts->precedence <= best_precedence) {
899 if (best_pce != -1
900 && pcc[best_pce]->pce_opts->precedence
901 == pcc[i]->pce_opts
902 ->precedence) {
903 if (ipaddr_cmp(
904 &pcc[i]->pce_opts->addr,
905 &pcc[best_pce]
906 ->pce_opts->addr)
907 > 0)
908 // collide of precedences so
909 // compare ip
910 best_pce = i;
911 } else {
912 if (!pcc[i]->previous_best) {
913 best_precedence =
914 pcc[i]->pce_opts
915 ->precedence;
916 best_pce = i;
917 }
918 }
919 }
920 }
921 }
922
923 zlog_debug(
924 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
925 step_0_best, step_0_previous, one_connected_pce, best_pce);
926
927 // Changed of state so ...
928 if (step_0_best != best_pce) {
929 // Calculate previous
930 previous_best_pce = step_0_best;
931 // Clean state
932 if (step_0_best != -1) {
933 pcc[step_0_best]->is_best = false;
934 }
935 if (step_0_previous != -1) {
936 pcc[step_0_previous]->previous_best = false;
937 }
938
939 // Set previous
940 if (previous_best_pce != -1
941 && pcc[previous_best_pce]->status
942 == PCEP_PCC_DISCONNECTED) {
943 pcc[previous_best_pce]->previous_best = true;
944 zlog_debug("multi-pce: previous best pce (%i) ",
945 previous_best_pce + 1);
946 }
947
948
949 // Set best
950 if (best_pce != -1) {
951 pcc[best_pce]->is_best = true;
952 zlog_debug("multi-pce: best pce (%i) ", best_pce + 1);
953 } else {
954 if (one_connected_pce != -1) {
955 best_pce = one_connected_pce;
956 pcc[one_connected_pce]->is_best = true;
957 zlog_debug(
958 "multi-pce: one connected best pce (default) (%i) ",
959 one_connected_pce + 1);
960 } else {
961 for (int i = 0; i < MAX_PCC; i++) {
962 if (pcc[i] && pcc[i]->pce_opts) {
963 best_pce = i;
964 pcc[i]->is_best = true;
965 zlog_debug(
966 "(disconnected) best pce (default) (%i) ",
967 i + 1);
968 break;
969 }
970 }
971 }
972 }
973 }
974
975 return ((best_pce == -1) ? 0 : pcc[best_pce]->id);
976 }
977
978 int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state **pcc,
979 struct pce_opts *pce_opts)
980 {
981 if (pcc == NULL) {
982 return 0;
983 }
984
985 for (int idx = 0; idx < MAX_PCC; idx++) {
986 if (pcc[idx]) {
987 if ((ipaddr_cmp((const struct ipaddr *)&pcc[idx]
988 ->pce_opts->addr,
989 (const struct ipaddr *)&pce_opts->addr)
990 == 0)
991 && pcc[idx]->pce_opts->port == pce_opts->port) {
992 zlog_debug("found pcc_id (%d) idx (%d)",
993 pcc[idx]->id, idx);
994 return pcc[idx]->id;
995 }
996 }
997 }
998 return 0;
999 }
1000
1001 int pcep_pcc_get_pcc_id_by_idx(struct pcc_state **pcc, int idx)
1002 {
1003 if (pcc == NULL || idx < 0) {
1004 return 0;
1005 }
1006
1007 return pcc[idx] ? pcc[idx]->id : 0;
1008 }
1009
1010 struct pcc_state *pcep_pcc_get_pcc_by_id(struct pcc_state **pcc, int id)
1011 {
1012 if (pcc == NULL || id < 0) {
1013 return NULL;
1014 }
1015
1016 for (int i = 0; i < MAX_PCC; i++) {
1017 if (pcc[i]) {
1018 if (pcc[i]->id == id) {
1019 zlog_debug("found id (%d) pcc_idx (%d)",
1020 pcc[i]->id, i);
1021 return pcc[i];
1022 }
1023 }
1024 }
1025
1026 return NULL;
1027 }
1028
1029 struct pcc_state *pcep_pcc_get_pcc_by_name(struct pcc_state **pcc,
1030 const char *pce_name)
1031 {
1032 if (pcc == NULL || pce_name == NULL) {
1033 return NULL;
1034 }
1035
1036 for (int i = 0; i < MAX_PCC; i++) {
1037 if (pcc[i] == NULL) {
1038 continue;
1039 }
1040
1041 if (strcmp(pcc[i]->pce_opts->pce_name, pce_name) == 0) {
1042 return pcc[i];
1043 }
1044 }
1045
1046 return NULL;
1047 }
1048
1049 int pcep_pcc_get_pcc_idx_by_id(struct pcc_state **pcc, int id)
1050 {
1051 if (pcc == NULL) {
1052 return -1;
1053 }
1054
1055 for (int idx = 0; idx < MAX_PCC; idx++) {
1056 if (pcc[idx]) {
1057 if (pcc[idx]->id == id) {
1058 zlog_debug("found pcc_id (%d) array_idx (%d)",
1059 pcc[idx]->id, idx);
1060 return idx;
1061 }
1062 }
1063 }
1064
1065 return -1;
1066 }
1067
1068 int pcep_pcc_get_free_pcc_idx(struct pcc_state **pcc)
1069 {
1070 assert(pcc != NULL);
1071
1072 for (int idx = 0; idx < MAX_PCC; idx++) {
1073 if (pcc[idx] == NULL) {
1074 zlog_debug("new pcc_idx (%d)", idx);
1075 return idx;
1076 }
1077 }
1078
1079 return -1;
1080 }
1081
1082 int pcep_pcc_get_pcc_id(struct pcc_state *pcc)
1083 {
1084 return ((pcc == NULL) ? 0 : pcc->id);
1085 }
1086
1087 void pcep_pcc_copy_pcc_info(struct pcc_state **pcc,
1088 struct pcep_pcc_info *pcc_info)
1089 {
1090 struct pcc_state *pcc_state =
1091 pcep_pcc_get_pcc_by_name(pcc, pcc_info->pce_name);
1092 if (!pcc_state) {
1093 return;
1094 }
1095
1096 pcc_info->ctrl_state = NULL;
1097 pcc_info->msd = pcc_state->pcc_opts->msd;
1098 pcc_info->pcc_port = pcc_state->pcc_opts->port;
1099 pcc_info->next_plspid = pcc_state->next_plspid;
1100 pcc_info->next_reqid = pcc_state->next_reqid;
1101 pcc_info->status = pcc_state->status;
1102 pcc_info->pcc_id = pcc_state->id;
1103 pcc_info->is_best_multi_pce = pcc_state->is_best;
1104 pcc_info->previous_best = pcc_state->previous_best;
1105 pcc_info->precedence =
1106 pcc_state->pce_opts ? pcc_state->pce_opts->precedence : 0;
1107 memcpy(&pcc_info->pcc_addr, &pcc_state->pcc_addr_tr,
1108 sizeof(struct ipaddr));
1109 }
1110
1111
1112 /*------------------ PCEP Message handlers --------------------- */
1113
1114 void handle_pcep_open(struct ctrl_state *ctrl_state,
1115 struct pcc_state *pcc_state, struct pcep_message *msg)
1116 {
1117 assert(msg->msg_header->type == PCEP_TYPE_OPEN);
1118 pcep_lib_parse_capabilities(msg, &pcc_state->caps);
1119 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1120 pcc_state->caps.is_stateful ? "stateful" : "stateless",
1121 pcc_state->caps.supported_ofs_are_known
1122 ? (pcc_state->caps.supported_ofs == 0
1123 ? "no objective functions supported"
1124 : "supported objective functions are ")
1125 : "supported objective functions are unknown",
1126 format_objfun_set(pcc_state->caps.supported_ofs));
1127 }
1128
1129 void handle_pcep_message(struct ctrl_state *ctrl_state,
1130 struct pcc_state *pcc_state, struct pcep_message *msg)
1131 {
1132 if (pcc_state->status != PCEP_PCC_OPERATING)
1133 return;
1134
1135 switch (msg->msg_header->type) {
1136 case PCEP_TYPE_INITIATE:
1137 handle_pcep_lsp_initiate(ctrl_state, pcc_state, msg);
1138 break;
1139 case PCEP_TYPE_UPDATE:
1140 handle_pcep_lsp_update(ctrl_state, pcc_state, msg);
1141 break;
1142 case PCEP_TYPE_PCREP:
1143 handle_pcep_comp_reply(ctrl_state, pcc_state, msg);
1144 break;
1145 default:
1146 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE,
1147 "Unexpected pcep message from pceplib: %s",
1148 format_pcep_message(msg));
1149 break;
1150 }
1151 }
1152
1153 void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
1154 struct pcc_state *pcc_state,
1155 struct pcep_message *msg)
1156 {
1157 char err[MAX_ERROR_MSG_SIZE] = "";
1158 struct path *path;
1159 path = pcep_lib_parse_path(msg);
1160 lookup_nbkey(pcc_state, path);
1161 /* TODO: Investigate if this is safe to do in the controller thread */
1162 path_pcep_config_lookup(path);
1163 specialize_incoming_path(pcc_state, path);
1164 PCEP_DEBUG("%s Received LSP update", pcc_state->tag);
1165 PCEP_DEBUG_PATH("%s", format_path(path));
1166
1167 if (validate_incoming_path(pcc_state, path, err, sizeof(err)))
1168 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1169 else {
1170 /* FIXME: Monitor the amount of errors from the PCE and
1171 * possibly disconnect and blacklist */
1172 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1173 "Unsupported PCEP protocol feature: %s", err);
1174 pcep_free_path(path);
1175 }
1176 }
1177
1178 void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
1179 struct pcc_state *pcc_state,
1180 struct pcep_message *msg)
1181 {
1182 PCEP_DEBUG("%s Received LSP initiate, not supported yet",
1183 pcc_state->tag);
1184
1185 /* TODO when we support both PCC and PCE initiated sessions,
1186 * we should first check the session type before
1187 * rejecting this message. */
1188 send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION,
1189 PCEP_ERRV_LSP_NOT_PCE_INITIATED);
1190 }
1191
1192 void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
1193 struct pcc_state *pcc_state,
1194 struct pcep_message *msg)
1195 {
1196 char err[MAX_ERROR_MSG_SIZE] = "";
1197 struct req_entry *req;
1198 struct path *path;
1199
1200 path = pcep_lib_parse_path(msg);
1201 req = pop_req(pcc_state, path->req_id);
1202 if (req == NULL) {
1203 /* TODO: check the rate of bad computation reply and close
1204 * the connection if more that a given rate.
1205 */
1206 PCEP_DEBUG(
1207 "%s Received computation reply for unknown request "
1208 "%d",
1209 pcc_state->tag, path->req_id);
1210 PCEP_DEBUG_PATH("%s", format_path(path));
1211 send_pcep_error(pcc_state, PCEP_ERRT_UNKNOWN_REQ_REF,
1212 PCEP_ERRV_UNASSIGNED);
1213 return;
1214 }
1215
1216 /* Cancel the computation request timeout */
1217 pcep_thread_cancel_timer(&req->t_retry);
1218
1219 /* Transfer relevent metadata from the request to the response */
1220 path->nbkey = req->path->nbkey;
1221 path->plsp_id = req->path->plsp_id;
1222 path->type = req->path->type;
1223 path->name = XSTRDUP(MTYPE_PCEP, req->path->name);
1224 specialize_incoming_path(pcc_state, path);
1225
1226 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1227 pcc_state->tag, path->req_id,
1228 path->no_path ? "true" : "false");
1229 PCEP_DEBUG_PATH("%s", format_path(path));
1230
1231 if (path->no_path) {
1232 PCEP_DEBUG("%s Computation for path %s did not find any result",
1233 pcc_state->tag, path->name);
1234 } else if (validate_incoming_path(pcc_state, path, err, sizeof(err))) {
1235 /* Updating a dynamic path will automatically delegate it */
1236 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1237 free_req_entry(req);
1238 return;
1239 } else {
1240 /* FIXME: Monitor the amount of errors from the PCE and
1241 * possibly disconnect and blacklist */
1242 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1243 "Unsupported PCEP protocol feature: %s", err);
1244 }
1245
1246 pcep_free_path(path);
1247
1248 /* Delegate the path regardless of the outcome */
1249 /* TODO: For now we are using the path from the request, when
1250 * pathd API is thread safe, we could get a new path */
1251 if (pcc_state->caps.is_stateful) {
1252 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
1253 pcc_state->tag, path->name, pcc_state->originator);
1254 path = pcep_copy_path(req->path);
1255 path->is_delegated = true;
1256 send_report(pcc_state, path);
1257 pcep_free_path(path);
1258 }
1259
1260 free_req_entry(req);
1261 }
1262
1263
1264 /* ------------ Internal Functions ------------ */
1265
1266 const char *ipaddr_type_name(struct ipaddr *addr)
1267 {
1268 if (IS_IPADDR_V4(addr))
1269 return "IPv4";
1270 if (IS_IPADDR_V6(addr))
1271 return "IPv6";
1272 return "undefined";
1273 }
1274
1275 bool filter_path(struct pcc_state *pcc_state, struct path *path)
1276 {
1277 return (IS_IPADDR_V4(&path->nbkey.endpoint)
1278 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4))
1279 || (IS_IPADDR_V6(&path->nbkey.endpoint)
1280 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1281 }
1282
1283 void select_pcc_addresses(struct pcc_state *pcc_state)
1284 {
1285 /* If no IPv4 address was specified, try to get one from zebra */
1286 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
1287 if (get_ipv4_router_id(&pcc_state->pcc_addr_v4)) {
1288 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
1289 }
1290 }
1291
1292 /* If no IPv6 address was specified, try to get one from zebra */
1293 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
1294 if (get_ipv6_router_id(&pcc_state->pcc_addr_v6)) {
1295 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
1296 }
1297 }
1298 }
1299
1300 void select_transport_address(struct pcc_state *pcc_state)
1301 {
1302 struct ipaddr *taddr = &pcc_state->pcc_addr_tr;
1303
1304 select_pcc_addresses(pcc_state);
1305
1306 taddr->ipa_type = IPADDR_NONE;
1307
1308 /* Select a transport source address in function of the configured PCE
1309 * address */
1310 if (IS_IPADDR_V4(&pcc_state->pce_opts->addr)) {
1311 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
1312 taddr->ipa_type = IPADDR_V4;
1313 taddr->ipaddr_v4 = pcc_state->pcc_addr_v4;
1314 }
1315 } else {
1316 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
1317 taddr->ipa_type = IPADDR_V6;
1318 taddr->ipaddr_v6 = pcc_state->pcc_addr_v6;
1319 }
1320 }
1321 }
1322
1323 void update_tag(struct pcc_state *pcc_state)
1324 {
1325 if (pcc_state->pce_opts != NULL) {
1326 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1327 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1328 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1329 "%pI6:%i (%u)",
1330 &pcc_state->pce_opts->addr.ipaddr_v6,
1331 pcc_state->pce_opts->port, pcc_state->id);
1332 } else {
1333 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1334 "%pI4:%i (%u)",
1335 &pcc_state->pce_opts->addr.ipaddr_v4,
1336 pcc_state->pce_opts->port, pcc_state->id);
1337 }
1338 } else {
1339 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag), "(%u)",
1340 pcc_state->id);
1341 }
1342 }
1343
1344 void update_originator(struct pcc_state *pcc_state)
1345 {
1346 char *originator;
1347 if (pcc_state->originator != NULL) {
1348 XFREE(MTYPE_PCEP, pcc_state->originator);
1349 pcc_state->originator = NULL;
1350 }
1351 if (pcc_state->pce_opts == NULL)
1352 return;
1353 originator = XCALLOC(MTYPE_PCEP, 52);
1354 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1355 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1356 snprintfrr(originator, 52, "%pI6:%i",
1357 &pcc_state->pce_opts->addr.ipaddr_v6,
1358 pcc_state->pce_opts->port);
1359 } else {
1360 snprintfrr(originator, 52, "%pI4:%i",
1361 &pcc_state->pce_opts->addr.ipaddr_v4,
1362 pcc_state->pce_opts->port);
1363 }
1364 pcc_state->originator = originator;
1365 }
1366
1367 void schedule_reconnect(struct ctrl_state *ctrl_state,
1368 struct pcc_state *pcc_state)
1369 {
1370 pcc_state->retry_count++;
1371 pcep_thread_schedule_reconnect(ctrl_state, pcc_state->id,
1372 pcc_state->retry_count,
1373 &pcc_state->t_reconnect);
1374 if (pcc_state->retry_count == 1) {
1375 pcep_thread_schedule_sync_best_pce(
1376 ctrl_state, pcc_state->id,
1377 pcc_state->pce_opts->config_opts
1378 .delegation_timeout_seconds,
1379 &pcc_state->t_update_best);
1380 }
1381 }
1382
1383 void schedule_session_timeout(struct ctrl_state *ctrl_state,
1384 struct pcc_state *pcc_state)
1385 {
1386 /* No need to schedule timeout if multiple PCEs are connected */
1387 if (get_pce_count_connected(ctrl_state->pcc)) {
1388 PCEP_DEBUG_PCEP(
1389 "schedule_session_timeout not setting timer for multi-pce mode");
1390
1391 return;
1392 }
1393
1394 pcep_thread_schedule_session_timeout(
1395 ctrl_state, pcep_pcc_get_pcc_id(pcc_state),
1396 pcc_state->pce_opts->config_opts
1397 .session_timeout_inteval_seconds,
1398 &pcc_state->t_session_timeout);
1399 }
1400
1401 void cancel_session_timeout(struct ctrl_state *ctrl_state,
1402 struct pcc_state *pcc_state)
1403 {
1404 /* No need to schedule timeout if multiple PCEs are connected */
1405 if (pcc_state->t_session_timeout == NULL) {
1406 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1407 return;
1408 }
1409
1410 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1411 pcep_thread_cancel_timer(&pcc_state->t_session_timeout);
1412 pcc_state->t_session_timeout = NULL;
1413 }
1414
1415 void send_pcep_message(struct pcc_state *pcc_state, struct pcep_message *msg)
1416 {
1417 if (pcc_state->sess != NULL) {
1418 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state->tag,
1419 format_pcep_message(msg));
1420 send_message(pcc_state->sess, msg, true);
1421 }
1422 }
1423
1424 void send_pcep_error(struct pcc_state *pcc_state,
1425 enum pcep_error_type error_type,
1426 enum pcep_error_value error_value)
1427 {
1428 struct pcep_message *msg;
1429 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1430 pcc_state->tag, pcep_error_type_name(error_type), error_type,
1431 pcep_error_value_name(error_type, error_value), error_value);
1432 msg = pcep_lib_format_error(error_type, error_value);
1433 send_pcep_message(pcc_state, msg);
1434 }
1435
1436 void send_report(struct pcc_state *pcc_state, struct path *path)
1437 {
1438 struct pcep_message *report;
1439
1440 path->req_id = 0;
1441 specialize_outgoing_path(pcc_state, path);
1442 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state->tag, path->name,
1443 format_path(path));
1444 report = pcep_lib_format_report(&pcc_state->caps, path);
1445 send_pcep_message(pcc_state, report);
1446 }
1447
1448 /* Updates the path for the PCE, updating the delegation and creation flags */
1449 void specialize_outgoing_path(struct pcc_state *pcc_state, struct path *path)
1450 {
1451 bool is_delegated = false;
1452 bool was_created = false;
1453
1454 lookup_plspid(pcc_state, path);
1455
1456 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
1457 path->sender = pcc_state->pcc_addr_tr;
1458
1459 /* TODO: When the pathd API have a way to mark a path as
1460 * delegated, use it instead of considering all dynamic path
1461 * delegated. We need to disable the originator check for now,
1462 * because path could be delegated without having any originator yet */
1463 // if ((path->originator == NULL)
1464 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1465 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1466 // && (path->first_hop != NULL);
1467 // /* it seems the PCE consider updating an LSP a creation ?!?
1468 // at least Cisco does... */
1469 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1470 // }
1471 is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC);
1472 was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1473
1474 path->pcc_id = pcc_state->id;
1475 path->go_active = is_delegated && pcc_state->is_best;
1476 path->is_delegated = is_delegated && pcc_state->is_best;
1477 path->was_created = was_created;
1478 }
1479
1480 /* Updates the path for the PCC */
1481 void specialize_incoming_path(struct pcc_state *pcc_state, struct path *path)
1482 {
1483 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
1484 path->sender = pcc_state->pce_opts->addr;
1485 path->pcc_id = pcc_state->id;
1486 path->update_origin = SRTE_ORIGIN_PCEP;
1487 path->originator = XSTRDUP(MTYPE_PCEP, pcc_state->originator);
1488 }
1489
1490 /* Ensure the path can be handled by the PCC and if not, sends an error */
1491 bool validate_incoming_path(struct pcc_state *pcc_state, struct path *path,
1492 char *errbuff, size_t buffsize)
1493 {
1494 struct path_hop *hop;
1495 enum pcep_error_type err_type = 0;
1496 enum pcep_error_value err_value = PCEP_ERRV_UNASSIGNED;
1497
1498 for (hop = path->first_hop; hop != NULL; hop = hop->next) {
1499 /* Hops without SID are not supported */
1500 if (!hop->has_sid) {
1501 snprintfrr(errbuff, buffsize, "SR segment without SID");
1502 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1503 err_value = PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING;
1504 break;
1505 }
1506 /* Hops with non-MPLS SID are not supported */
1507 if (!hop->is_mpls) {
1508 snprintfrr(errbuff, buffsize,
1509 "SR segment with non-MPLS SID");
1510 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1511 err_value = PCEP_ERRV_UNSUPPORTED_NAI;
1512 break;
1513 }
1514 }
1515
1516 if (err_type != 0) {
1517 send_pcep_error(pcc_state, err_type, err_value);
1518 return false;
1519 }
1520
1521 return true;
1522 }
1523
1524 void send_comp_request(struct ctrl_state *ctrl_state,
1525 struct pcc_state *pcc_state, struct req_entry *req)
1526 {
1527 assert(req != NULL);
1528
1529 if (req->t_retry)
1530 return;
1531
1532 assert(req->path != NULL);
1533 assert(req->path->req_id > 0);
1534 assert(RB_FIND(req_entry_head, &pcc_state->requests, req) == req);
1535 assert(lookup_reqid(pcc_state, req->path) == req->path->req_id);
1536
1537 int timeout;
1538 char buff[40];
1539 struct pcep_message *msg;
1540
1541 if (!pcc_state->is_best) {
1542 return;
1543 }
1544 /* TODO: Add a timer to retry the computation request ? */
1545
1546 specialize_outgoing_path(pcc_state, req->path);
1547
1548 PCEP_DEBUG(
1549 "%s Sending computation request %d for path %s to %s (retry %d)",
1550 pcc_state->tag, req->path->req_id, req->path->name,
1551 ipaddr2str(&req->path->nbkey.endpoint, buff, sizeof(buff)),
1552 req->retry_count);
1553 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state->tag,
1554 req->path->name, format_path(req->path));
1555
1556 msg = pcep_lib_format_request(&pcc_state->caps, req->path);
1557 send_pcep_message(pcc_state, msg);
1558 req->was_sent = true;
1559
1560 /* TODO: Enable this back when the pcep config changes are merged back
1561 */
1562 // timeout = pcc_state->pce_opts->config_opts.pcep_request_time_seconds;
1563 timeout = 30;
1564 pcep_thread_schedule_timeout(ctrl_state, pcc_state->id,
1565 TO_COMPUTATION_REQUEST, timeout,
1566 (void *)req, &req->t_retry);
1567 }
1568
1569 void cancel_comp_requests(struct ctrl_state *ctrl_state,
1570 struct pcc_state *pcc_state)
1571 {
1572 struct req_entry *req, *safe_req;
1573
1574 RB_FOREACH_SAFE (req, req_entry_head, &pcc_state->requests, safe_req) {
1575 cancel_comp_request(ctrl_state, pcc_state, req);
1576 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1577 remove_reqid_mapping(pcc_state, req->path);
1578 free_req_entry(req);
1579 }
1580 }
1581
1582 void cancel_comp_request(struct ctrl_state *ctrl_state,
1583 struct pcc_state *pcc_state, struct req_entry *req)
1584 {
1585 char buff[40];
1586 struct pcep_message *msg;
1587
1588 if (req->was_sent) {
1589 /* TODO: Send a computation request cancelation
1590 * notification to the PCE */
1591 pcep_thread_cancel_timer(&req->t_retry);
1592 }
1593
1594 PCEP_DEBUG(
1595 "%s Canceling computation request %d for path %s to %s (retry %d)",
1596 pcc_state->tag, req->path->req_id, req->path->name,
1597 ipaddr2str(&req->path->nbkey.endpoint, buff, sizeof(buff)),
1598 req->retry_count);
1599 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1600 pcc_state->tag, req->path->name,
1601 format_path(req->path));
1602
1603 msg = pcep_lib_format_request_cancelled(req->path->req_id);
1604 send_pcep_message(pcc_state, msg);
1605 }
1606
1607 void set_pcc_address(struct pcc_state *pcc_state, struct lsp_nb_key *nbkey,
1608 struct ipaddr *addr)
1609 {
1610 select_pcc_addresses(pcc_state);
1611 if (IS_IPADDR_V6(&nbkey->endpoint)) {
1612 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1613 addr->ipa_type = IPADDR_V6;
1614 addr->ipaddr_v6 = pcc_state->pcc_addr_v6;
1615 } else if (IS_IPADDR_V4(&nbkey->endpoint)) {
1616 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4));
1617 addr->ipa_type = IPADDR_V4;
1618 addr->ipaddr_v4 = pcc_state->pcc_addr_v4;
1619 } else {
1620 addr->ipa_type = IPADDR_NONE;
1621 }
1622 }
1623
1624
1625 /* ------------ Data Structure Helper Functions ------------ */
1626
1627 void lookup_plspid(struct pcc_state *pcc_state, struct path *path)
1628 {
1629 struct plspid_map_data key, *plspid_mapping;
1630 struct nbkey_map_data *nbkey_mapping;
1631
1632 if (path->nbkey.color != 0) {
1633 key.nbkey = path->nbkey;
1634 plspid_mapping = plspid_map_find(&pcc_state->plspid_map, &key);
1635 if (plspid_mapping == NULL) {
1636 plspid_mapping =
1637 XCALLOC(MTYPE_PCEP, sizeof(*plspid_mapping));
1638 plspid_mapping->nbkey = key.nbkey;
1639 plspid_mapping->plspid = pcc_state->next_plspid;
1640 plspid_map_add(&pcc_state->plspid_map, plspid_mapping);
1641 nbkey_mapping =
1642 XCALLOC(MTYPE_PCEP, sizeof(*nbkey_mapping));
1643 nbkey_mapping->nbkey = key.nbkey;
1644 nbkey_mapping->plspid = pcc_state->next_plspid;
1645 nbkey_map_add(&pcc_state->nbkey_map, nbkey_mapping);
1646 pcc_state->next_plspid++;
1647 // FIXME: Send some error to the PCE isntead of crashing
1648 assert(pcc_state->next_plspid <= 1048576);
1649 }
1650 path->plsp_id = plspid_mapping->plspid;
1651 }
1652 }
1653
1654 void lookup_nbkey(struct pcc_state *pcc_state, struct path *path)
1655 {
1656 struct nbkey_map_data key, *mapping;
1657 // TODO: Should give an error to the PCE instead of crashing
1658 assert(path->plsp_id != 0);
1659 key.plspid = path->plsp_id;
1660 mapping = nbkey_map_find(&pcc_state->nbkey_map, &key);
1661 assert(mapping != NULL);
1662 path->nbkey = mapping->nbkey;
1663 }
1664
1665 void free_req_entry(struct req_entry *req)
1666 {
1667 pcep_free_path(req->path);
1668 XFREE(MTYPE_PCEP, req);
1669 }
1670
1671 struct req_entry *push_new_req(struct pcc_state *pcc_state, struct path *path)
1672 {
1673 struct req_entry *req;
1674
1675 req = XCALLOC(MTYPE_PCEP, sizeof(*req));
1676 req->retry_count = 0;
1677 req->path = pcep_copy_path(path);
1678 repush_req(pcc_state, req);
1679
1680 return req;
1681 }
1682
1683 void repush_req(struct pcc_state *pcc_state, struct req_entry *req)
1684 {
1685 uint32_t reqid = pcc_state->next_reqid;
1686 void *res;
1687
1688 req->was_sent = false;
1689 req->path->req_id = reqid;
1690 res = RB_INSERT(req_entry_head, &pcc_state->requests, req);
1691 assert(res == NULL);
1692 assert(add_reqid_mapping(pcc_state, req->path) == true);
1693
1694 pcc_state->next_reqid += 1;
1695 /* Wrapping is allowed, but 0 is not a valid id */
1696 if (pcc_state->next_reqid == 0)
1697 pcc_state->next_reqid = 1;
1698 }
1699
1700 struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid)
1701 {
1702 struct path path = {.req_id = reqid};
1703 struct req_entry key = {.path = &path};
1704 struct req_entry *req;
1705
1706 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1707 if (req == NULL)
1708 return NULL;
1709 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1710 remove_reqid_mapping(pcc_state, req->path);
1711
1712 return req;
1713 }
1714
1715 bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1716 {
1717 struct req_map_data *mapping;
1718 mapping = XCALLOC(MTYPE_PCEP, sizeof(*mapping));
1719 mapping->nbkey = path->nbkey;
1720 mapping->reqid = path->req_id;
1721 if (req_map_add(&pcc_state->req_map, mapping) != NULL) {
1722 XFREE(MTYPE_PCEP, mapping);
1723 return false;
1724 }
1725 return true;
1726 }
1727
1728 void remove_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1729 {
1730 struct req_map_data key, *mapping;
1731 key.nbkey = path->nbkey;
1732 mapping = req_map_find(&pcc_state->req_map, &key);
1733 if (mapping != NULL) {
1734 req_map_del(&pcc_state->req_map, mapping);
1735 XFREE(MTYPE_PCEP, mapping);
1736 }
1737 }
1738
1739 uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path)
1740 {
1741 struct req_map_data key, *mapping;
1742 key.nbkey = path->nbkey;
1743 mapping = req_map_find(&pcc_state->req_map, &key);
1744 if (mapping != NULL)
1745 return mapping->reqid;
1746 return 0;
1747 }
1748
1749 bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path)
1750 {
1751 return lookup_reqid(pcc_state, path) != 0;
1752 }
1753
1754
1755 /* ------------ Data Structure Callbacks ------------ */
1756
1757 #define CMP_RETURN(A, B) \
1758 if (A != B) \
1759 return (A < B) ? -1 : 1
1760
1761 static uint32_t hash_nbkey(const struct lsp_nb_key *nbkey)
1762 {
1763 uint32_t hash;
1764 hash = jhash_2words(nbkey->color, nbkey->preference, 0x55aa5a5a);
1765 switch (nbkey->endpoint.ipa_type) {
1766 case IPADDR_V4:
1767 return jhash(&nbkey->endpoint.ipaddr_v4,
1768 sizeof(nbkey->endpoint.ipaddr_v4), hash);
1769 case IPADDR_V6:
1770 return jhash(&nbkey->endpoint.ipaddr_v6,
1771 sizeof(nbkey->endpoint.ipaddr_v6), hash);
1772 default:
1773 return hash;
1774 }
1775 }
1776
1777 static int cmp_nbkey(const struct lsp_nb_key *a, const struct lsp_nb_key *b)
1778 {
1779 CMP_RETURN(a->color, b->color);
1780 int cmp = ipaddr_cmp(&a->endpoint, &b->endpoint);
1781 if (cmp != 0)
1782 return cmp;
1783 CMP_RETURN(a->preference, b->preference);
1784 return 0;
1785 }
1786
1787 int plspid_map_cmp(const struct plspid_map_data *a,
1788 const struct plspid_map_data *b)
1789 {
1790 return cmp_nbkey(&a->nbkey, &b->nbkey);
1791 }
1792
1793 uint32_t plspid_map_hash(const struct plspid_map_data *e)
1794 {
1795 return hash_nbkey(&e->nbkey);
1796 }
1797
1798 int nbkey_map_cmp(const struct nbkey_map_data *a,
1799 const struct nbkey_map_data *b)
1800 {
1801 CMP_RETURN(a->plspid, b->plspid);
1802 return 0;
1803 }
1804
1805 uint32_t nbkey_map_hash(const struct nbkey_map_data *e)
1806 {
1807 return e->plspid;
1808 }
1809
1810 int req_map_cmp(const struct req_map_data *a, const struct req_map_data *b)
1811 {
1812 return cmp_nbkey(&a->nbkey, &b->nbkey);
1813 }
1814
1815 uint32_t req_map_hash(const struct req_map_data *e)
1816 {
1817 return hash_nbkey(&e->nbkey);
1818 }