2 * Copyright (C) 2020 NetDEF, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along
15 * with this program; see the file COPYING; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 /* TODOS AND KNOWN ISSUES:
20 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
22 - Revert the hacks to work around ODL requiring a report with
23 operational status DOWN when an LSP is activated.
24 - Enforce only the PCE a policy has been delegated to can update it.
25 - If the router-id is used because the PCC IP is not specified
26 (either IPv4 or IPv6), the connection to the PCE is not reset
27 when the router-id changes.
37 #include "northbound.h"
38 #include "frr_pthread.h"
41 #include "pathd/pathd.h"
42 #include "pathd/path_zebra.h"
43 #include "pathd/path_errors.h"
44 #include "pathd/path_pcep_memory.h"
45 #include "pathd/path_pcep.h"
46 #include "pathd/path_pcep_controller.h"
47 #include "pathd/path_pcep_lib.h"
48 #include "pathd/path_pcep_config.h"
49 #include "pathd/path_pcep_debug.h"
52 /* The number of time we will skip connecting if we are missing the PCC
53 * address for an inet family different from the selected transport one*/
54 #define OTHER_FAMILY_MAX_RETRIES 4
55 #define MAX_ERROR_MSG_SIZE 256
56 #define MAX_COMPREQ_TRIES 3
59 /* PCEP Event Handler */
60 static void handle_pcep_open(struct ctrl_state
*ctrl_state
,
61 struct pcc_state
*pcc_state
,
62 struct pcep_message
*msg
);
63 static void handle_pcep_message(struct ctrl_state
*ctrl_state
,
64 struct pcc_state
*pcc_state
,
65 struct pcep_message
*msg
);
66 static void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
67 struct pcc_state
*pcc_state
,
68 struct pcep_message
*msg
);
69 static void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
70 struct pcc_state
*pcc_state
,
71 struct pcep_message
*msg
);
72 static void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
73 struct pcc_state
*pcc_state
,
74 struct pcep_message
*msg
);
76 /* Internal Functions */
77 static const char *ipaddr_type_name(struct ipaddr
*addr
);
78 static bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
);
79 static void select_pcc_addresses(struct pcc_state
*pcc_state
);
80 static void select_transport_address(struct pcc_state
*pcc_state
);
81 static void update_tag(struct pcc_state
*pcc_state
);
82 static void update_originator(struct pcc_state
*pcc_state
);
83 static void schedule_reconnect(struct ctrl_state
*ctrl_state
,
84 struct pcc_state
*pcc_state
);
85 static void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
86 struct pcc_state
*pcc_state
);
87 static void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
88 struct pcc_state
*pcc_state
);
89 static void send_pcep_message(struct pcc_state
*pcc_state
,
90 struct pcep_message
*msg
);
91 static void send_pcep_error(struct pcc_state
*pcc_state
,
92 enum pcep_error_type error_type
,
93 enum pcep_error_value error_value
);
94 static void send_report(struct pcc_state
*pcc_state
, struct path
*path
);
95 static void send_comp_request(struct ctrl_state
*ctrl_state
,
96 struct pcc_state
*pcc_state
,
97 struct req_entry
*req
);
98 static void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
99 struct pcc_state
*pcc_state
);
100 static void cancel_comp_request(struct ctrl_state
*ctrl_state
,
101 struct pcc_state
*pcc_state
,
102 struct req_entry
*req
);
103 static void specialize_outgoing_path(struct pcc_state
*pcc_state
,
105 static void specialize_incoming_path(struct pcc_state
*pcc_state
,
107 static bool validate_incoming_path(struct pcc_state
*pcc_state
,
108 struct path
*path
, char *errbuff
,
110 static void set_pcc_address(struct pcc_state
*pcc_state
,
111 struct lsp_nb_key
*nbkey
, struct ipaddr
*addr
);
112 static int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
);
113 static int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
);
114 static int get_previous_best_pce(struct pcc_state
**pcc
);
115 static int get_best_pce(struct pcc_state
**pcc
);
116 static int get_pce_count_connected(struct pcc_state
**pcc
);
117 static bool update_best_pce(struct pcc_state
**pcc
, int best
);
119 /* Data Structure Helper Functions */
120 static void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
);
121 static void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
);
122 static void free_req_entry(struct req_entry
*req
);
123 static struct req_entry
*push_new_req(struct pcc_state
*pcc_state
,
125 static void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
);
126 static struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
);
127 static bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
);
128 static void remove_reqid_mapping(struct pcc_state
*pcc_state
,
130 static uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
);
131 static bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
);
133 /* Data Structure Callbacks */
134 static int plspid_map_cmp(const struct plspid_map_data
*a
,
135 const struct plspid_map_data
*b
);
136 static uint32_t plspid_map_hash(const struct plspid_map_data
*e
);
137 static int nbkey_map_cmp(const struct nbkey_map_data
*a
,
138 const struct nbkey_map_data
*b
);
139 static uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
);
140 static int req_map_cmp(const struct req_map_data
*a
,
141 const struct req_map_data
*b
);
142 static uint32_t req_map_hash(const struct req_map_data
*e
);
144 /* Data Structure Declarations */
145 DECLARE_HASH(plspid_map
, struct plspid_map_data
, mi
, plspid_map_cmp
,
147 DECLARE_HASH(nbkey_map
, struct nbkey_map_data
, mi
, nbkey_map_cmp
,
149 DECLARE_HASH(req_map
, struct req_map_data
, mi
, req_map_cmp
, req_map_hash
)
151 static inline int req_entry_compare(const struct req_entry
*a
,
152 const struct req_entry
*b
)
154 return a
->path
->req_id
- b
->path
->req_id
;
156 RB_GENERATE(req_entry_head
, req_entry
, entry
, req_entry_compare
)
159 /* ------------ API Functions ------------ */
161 struct pcc_state
*pcep_pcc_initialize(struct ctrl_state
*ctrl_state
, int index
)
163 struct pcc_state
*pcc_state
= XCALLOC(MTYPE_PCEP
, sizeof(*pcc_state
));
165 pcc_state
->id
= index
;
166 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
167 pcc_state
->next_reqid
= 1;
168 pcc_state
->next_plspid
= 1;
170 RB_INIT(req_entry_head
, &pcc_state
->requests
);
172 update_tag(pcc_state
);
173 update_originator(pcc_state
);
175 PCEP_DEBUG("%s PCC initialized", pcc_state
->tag
);
180 void pcep_pcc_finalize(struct ctrl_state
*ctrl_state
,
181 struct pcc_state
*pcc_state
)
183 PCEP_DEBUG("%s PCC finalizing...", pcc_state
->tag
);
185 pcep_pcc_disable(ctrl_state
, pcc_state
);
187 if (pcc_state
->pcc_opts
!= NULL
) {
188 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
189 pcc_state
->pcc_opts
= NULL
;
191 if (pcc_state
->pce_opts
!= NULL
) {
192 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
193 pcc_state
->pce_opts
= NULL
;
195 if (pcc_state
->originator
!= NULL
) {
196 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
197 pcc_state
->originator
= NULL
;
200 if (pcc_state
->t_reconnect
!= NULL
) {
201 thread_cancel(&pcc_state
->t_reconnect
);
202 pcc_state
->t_reconnect
= NULL
;
205 if (pcc_state
->t_update_best
!= NULL
) {
206 thread_cancel(&pcc_state
->t_update_best
);
207 pcc_state
->t_update_best
= NULL
;
210 if (pcc_state
->t_session_timeout
!= NULL
) {
211 thread_cancel(&pcc_state
->t_session_timeout
);
212 pcc_state
->t_session_timeout
= NULL
;
215 XFREE(MTYPE_PCEP
, pcc_state
);
218 int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
)
230 retval
= lhs
->port
- rhs
->port
;
235 retval
= lhs
->msd
- rhs
->msd
;
240 if (IS_IPADDR_V4(&lhs
->addr
)) {
241 retval
= memcmp(&lhs
->addr
.ipaddr_v4
, &rhs
->addr
.ipaddr_v4
,
242 sizeof(lhs
->addr
.ipaddr_v4
));
246 } else if (IS_IPADDR_V6(&lhs
->addr
)) {
247 retval
= memcmp(&lhs
->addr
.ipaddr_v6
, &rhs
->addr
.ipaddr_v6
,
248 sizeof(lhs
->addr
.ipaddr_v6
));
257 int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
)
267 int retval
= lhs
->port
- rhs
->port
;
272 retval
= strcmp(lhs
->pce_name
, rhs
->pce_name
);
277 retval
= lhs
->precedence
- rhs
->precedence
;
282 retval
= memcmp(&lhs
->addr
, &rhs
->addr
, sizeof(lhs
->addr
));
290 int pcep_pcc_update(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
,
291 struct pcc_opts
*pcc_opts
, struct pce_opts
*pce_opts
)
295 // If the options did not change, then there is nothing to do
296 if ((compare_pce_opts(pce_opts
, pcc_state
->pce_opts
) == 0)
297 && (compare_pcc_opts(pcc_opts
, pcc_state
->pcc_opts
) == 0)) {
301 if ((ret
= pcep_pcc_disable(ctrl_state
, pcc_state
))) {
302 XFREE(MTYPE_PCEP
, pcc_opts
);
303 XFREE(MTYPE_PCEP
, pce_opts
);
307 if (pcc_state
->pcc_opts
!= NULL
) {
308 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
310 if (pcc_state
->pce_opts
!= NULL
) {
311 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
314 pcc_state
->pcc_opts
= pcc_opts
;
315 pcc_state
->pce_opts
= pce_opts
;
317 if (IS_IPADDR_V4(&pcc_opts
->addr
)) {
318 pcc_state
->pcc_addr_v4
= pcc_opts
->addr
.ipaddr_v4
;
319 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
321 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
324 if (IS_IPADDR_V6(&pcc_opts
->addr
)) {
325 memcpy(&pcc_state
->pcc_addr_v6
, &pcc_opts
->addr
.ipaddr_v6
,
326 sizeof(struct in6_addr
));
327 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
329 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
332 update_tag(pcc_state
);
333 update_originator(pcc_state
);
335 return pcep_pcc_enable(ctrl_state
, pcc_state
);
338 void pcep_pcc_reconnect(struct ctrl_state
*ctrl_state
,
339 struct pcc_state
*pcc_state
)
341 if (pcc_state
->status
== PCEP_PCC_DISCONNECTED
)
342 pcep_pcc_enable(ctrl_state
, pcc_state
);
345 int pcep_pcc_enable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
350 assert(pcc_state
->status
== PCEP_PCC_DISCONNECTED
);
351 assert(pcc_state
->sess
== NULL
);
353 if (pcc_state
->t_reconnect
!= NULL
) {
354 thread_cancel(&pcc_state
->t_reconnect
);
355 pcc_state
->t_reconnect
= NULL
;
358 select_transport_address(pcc_state
);
360 /* Even though we are connecting using IPv6. we want to have an IPv4
361 * address so we can handle candidate path with IPv4 endpoints */
362 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
363 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
364 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
365 "skipping connection to PCE %s:%d due to "
366 "missing PCC IPv4 address",
367 ipaddr2str(&pcc_state
->pce_opts
->addr
,
368 pce_buff
, sizeof(pce_buff
)),
369 pcc_state
->pce_opts
->port
);
370 schedule_reconnect(ctrl_state
, pcc_state
);
373 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
374 "missing IPv4 PCC address, IPv4 candidate "
375 "paths will be ignored");
379 /* Even though we are connecting using IPv4. we want to have an IPv6
380 * address so we can handle candidate path with IPv6 endpoints */
381 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
382 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
383 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
384 "skipping connection to PCE %s:%d due to "
385 "missing PCC IPv6 address",
386 ipaddr2str(&pcc_state
->pce_opts
->addr
,
387 pce_buff
, sizeof(pce_buff
)),
388 pcc_state
->pce_opts
->port
);
389 schedule_reconnect(ctrl_state
, pcc_state
);
392 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
393 "missing IPv6 PCC address, IPv6 candidate "
394 "paths will be ignored");
398 /* Even if the maximum retries to try to have all the familly addresses
399 * have been spent, we still need the one for the transport familly */
400 if (pcc_state
->pcc_addr_tr
.ipa_type
== IPADDR_NONE
) {
401 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
402 "skipping connection to PCE %s:%d due to missing "
404 ipaddr2str(&pcc_state
->pce_opts
->addr
, pce_buff
,
406 pcc_state
->pce_opts
->port
);
407 schedule_reconnect(ctrl_state
, pcc_state
);
411 PCEP_DEBUG("%s PCC connecting", pcc_state
->tag
);
412 pcc_state
->sess
= pcep_lib_connect(
413 &pcc_state
->pcc_addr_tr
, pcc_state
->pcc_opts
->port
,
414 &pcc_state
->pce_opts
->addr
, pcc_state
->pce_opts
->port
,
415 pcc_state
->pcc_opts
->msd
, &pcc_state
->pce_opts
->config_opts
);
417 if (pcc_state
->sess
== NULL
) {
418 flog_warn(EC_PATH_PCEP_LIB_CONNECT
,
419 "failed to connect to PCE %s:%d from %s:%d",
420 ipaddr2str(&pcc_state
->pce_opts
->addr
, pce_buff
,
422 pcc_state
->pce_opts
->port
,
423 ipaddr2str(&pcc_state
->pcc_addr_tr
, pcc_buff
,
425 pcc_state
->pcc_opts
->port
);
426 schedule_reconnect(ctrl_state
, pcc_state
);
430 // In case some best pce alternative were waiting to activate
431 if (pcc_state
->t_update_best
!= NULL
) {
432 thread_cancel(&pcc_state
->t_update_best
);
433 pcc_state
->t_update_best
= NULL
;
436 pcc_state
->status
= PCEP_PCC_CONNECTING
;
441 int pcep_pcc_disable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
443 switch (pcc_state
->status
) {
444 case PCEP_PCC_DISCONNECTED
:
446 case PCEP_PCC_CONNECTING
:
447 case PCEP_PCC_SYNCHRONIZING
:
448 case PCEP_PCC_OPERATING
:
449 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state
->tag
);
450 cancel_comp_requests(ctrl_state
, pcc_state
);
451 pcep_lib_disconnect(pcc_state
->sess
);
452 /* No need to remove if any PCEs is connected */
453 if (get_pce_count_connected(ctrl_state
->pcc
) == 0) {
454 pcep_thread_remove_candidate_path_segments(ctrl_state
,
457 pcc_state
->sess
= NULL
;
458 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
465 void pcep_pcc_sync_path(struct ctrl_state
*ctrl_state
,
466 struct pcc_state
*pcc_state
, struct path
*path
)
468 if (pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
469 path
->is_synching
= true;
470 } else if (pcc_state
->status
== PCEP_PCC_OPERATING
)
471 path
->is_synching
= false;
475 path
->go_active
= true;
477 /* Accumulate the dynamic paths without any LSP so computation
478 * requests can be performed after synchronization */
479 if ((path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)
480 && (path
->first_hop
== NULL
)
481 && !has_pending_req_for(pcc_state
, path
)) {
482 PCEP_DEBUG("%s Scheduling computation request for path %s",
483 pcc_state
->tag
, path
->name
);
484 push_new_req(pcc_state
, path
);
488 /* Synchronize the path if the PCE supports LSP updates and the
489 * endpoint address familly is supported */
490 if (pcc_state
->caps
.is_stateful
) {
491 if (filter_path(pcc_state
, path
)) {
492 PCEP_DEBUG("%s Synchronizing path %s", pcc_state
->tag
,
494 send_report(pcc_state
, path
);
497 "%s Skipping %s candidate path %s "
500 ipaddr_type_name(&path
->nbkey
.endpoint
),
506 void pcep_pcc_sync_done(struct ctrl_state
*ctrl_state
,
507 struct pcc_state
*pcc_state
)
509 struct req_entry
*req
;
511 if (pcc_state
->status
!= PCEP_PCC_SYNCHRONIZING
512 && pcc_state
->status
!= PCEP_PCC_OPERATING
)
515 if (pcc_state
->caps
.is_stateful
516 && pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
517 struct path
*path
= pcep_new_path();
518 *path
= (struct path
){.name
= NULL
,
521 .status
= PCEP_LSP_OPERATIONAL_DOWN
,
524 .was_created
= false,
525 .was_removed
= false,
526 .is_synching
= false,
527 .is_delegated
= false,
529 .first_metric
= NULL
};
530 send_report(pcc_state
, path
);
531 pcep_free_path(path
);
534 pcc_state
->synchronized
= true;
535 pcc_state
->status
= PCEP_PCC_OPERATING
;
537 PCEP_DEBUG("%s Synchronization done", pcc_state
->tag
);
539 /* Start the computation request accumulated during synchronization */
540 RB_FOREACH (req
, req_entry_head
, &pcc_state
->requests
) {
541 send_comp_request(ctrl_state
, pcc_state
, req
);
545 void pcep_pcc_send_report(struct ctrl_state
*ctrl_state
,
546 struct pcc_state
*pcc_state
, struct path
*path
)
548 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
551 if (pcc_state
->caps
.is_stateful
) {
552 PCEP_DEBUG("%s Send report for candidate path %s",
553 pcc_state
->tag
, path
->name
);
554 send_report(pcc_state
, path
);
558 /* ------------ Timeout handler ------------ */
560 void pcep_pcc_timeout_handler(struct ctrl_state
*ctrl_state
,
561 struct pcc_state
*pcc_state
,
562 enum pcep_ctrl_timer_type type
, void *param
)
564 struct req_entry
*req
;
567 case TO_COMPUTATION_REQUEST
:
568 assert(param
!= NULL
);
569 req
= (struct req_entry
*)param
;
570 pop_req(pcc_state
, req
->path
->req_id
);
571 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT
,
572 "Computation request %d timeout", req
->path
->req_id
);
573 cancel_comp_request(ctrl_state
, pcc_state
, req
);
574 if (req
->retry_count
++ < MAX_COMPREQ_TRIES
) {
575 repush_req(pcc_state
, req
);
576 send_comp_request(ctrl_state
, pcc_state
, req
);
579 if (pcc_state
->caps
.is_stateful
) {
582 "%s Delegating undefined dynamic path %s to PCE %s",
583 pcc_state
->tag
, req
->path
->name
,
584 pcc_state
->originator
);
585 path
= pcep_copy_path(req
->path
);
586 path
->is_delegated
= true;
587 send_report(pcc_state
, path
);
597 /* ------------ Pathd event handler ------------ */
599 void pcep_pcc_pathd_event_handler(struct ctrl_state
*ctrl_state
,
600 struct pcc_state
*pcc_state
,
601 enum pcep_pathd_event_type type
,
604 struct req_entry
*req
;
606 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
609 /* Skipping candidate path with endpoint that do not match the
610 * configured or deduced PCC IP version */
611 if (!filter_path(pcc_state
, path
)) {
612 PCEP_DEBUG("%s Skipping %s candidate path %s event",
614 ipaddr_type_name(&path
->nbkey
.endpoint
), path
->name
);
619 case PCEP_PATH_CREATED
:
620 if (has_pending_req_for(pcc_state
, path
)) {
622 "%s Candidate path %s created, computation request already sent",
623 pcc_state
->tag
, path
->name
);
626 PCEP_DEBUG("%s Candidate path %s created", pcc_state
->tag
,
628 if ((path
->first_hop
== NULL
)
629 && (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)) {
630 req
= push_new_req(pcc_state
, path
);
631 send_comp_request(ctrl_state
, pcc_state
, req
);
632 } else if (pcc_state
->caps
.is_stateful
)
633 send_report(pcc_state
, path
);
635 case PCEP_PATH_UPDATED
:
636 PCEP_DEBUG("%s Candidate path %s updated", pcc_state
->tag
,
638 if (pcc_state
->caps
.is_stateful
)
639 send_report(pcc_state
, path
);
641 case PCEP_PATH_REMOVED
:
642 PCEP_DEBUG("%s Candidate path %s removed", pcc_state
->tag
,
644 path
->was_removed
= true;
645 if (pcc_state
->caps
.is_stateful
)
646 send_report(pcc_state
, path
);
649 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR
,
650 "Unexpected pathd event received by pcc %s: %u",
651 pcc_state
->tag
, type
);
657 /* ------------ PCEP event handler ------------ */
659 void pcep_pcc_pcep_event_handler(struct ctrl_state
*ctrl_state
,
660 struct pcc_state
*pcc_state
, pcep_event
*event
)
662 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state
->tag
,
663 pcep_event_type_name(event
->event_type
));
664 switch (event
->event_type
) {
665 case PCC_CONNECTED_TO_PCE
:
666 assert(PCEP_PCC_CONNECTING
== pcc_state
->status
);
667 PCEP_DEBUG("%s Connection established", pcc_state
->tag
);
668 pcc_state
->status
= PCEP_PCC_SYNCHRONIZING
;
669 pcc_state
->retry_count
= 0;
670 pcc_state
->synchronized
= false;
671 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state
->tag
);
672 cancel_session_timeout(ctrl_state
, pcc_state
);
673 pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
674 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
676 case PCC_SENT_INVALID_OPEN
:
677 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state
->tag
);
679 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
681 pcc_state
->sess
->pcc_config
682 .keep_alive_pce_negotiated_timer_seconds
,
683 pcc_state
->sess
->pcc_config
684 .dead_timer_pce_negotiated_seconds
);
685 pcc_state
->pce_opts
->config_opts
.keep_alive_seconds
=
686 pcc_state
->sess
->pcc_config
687 .keep_alive_pce_negotiated_timer_seconds
;
688 pcc_state
->pce_opts
->config_opts
.dead_timer_seconds
=
689 pcc_state
->sess
->pcc_config
690 .dead_timer_pce_negotiated_seconds
;
693 case PCC_RCVD_INVALID_OPEN
:
694 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state
->tag
);
695 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state
->tag
,
696 format_pcep_message(event
->message
));
698 case PCE_DEAD_TIMER_EXPIRED
:
699 case PCE_CLOSED_SOCKET
:
700 case PCE_SENT_PCEP_CLOSE
:
701 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED
:
702 case PCC_PCEP_SESSION_CLOSED
:
703 case PCC_RCVD_MAX_INVALID_MSGS
:
704 case PCC_RCVD_MAX_UNKOWN_MSGS
:
705 pcep_pcc_disable(ctrl_state
, pcc_state
);
706 schedule_reconnect(ctrl_state
, pcc_state
);
707 schedule_session_timeout(ctrl_state
, pcc_state
);
709 case MESSAGE_RECEIVED
:
710 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state
->tag
,
711 format_pcep_message(event
->message
));
712 if (pcc_state
->status
== PCEP_PCC_CONNECTING
) {
713 if (event
->message
->msg_header
->type
== PCEP_TYPE_OPEN
)
714 handle_pcep_open(ctrl_state
, pcc_state
,
718 assert(pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
719 || pcc_state
->status
== PCEP_PCC_OPERATING
);
720 handle_pcep_message(ctrl_state
, pcc_state
, event
->message
);
723 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT
,
724 "Unexpected event from pceplib: %s",
725 format_pcep_event(event
));
731 /*------------------ Multi-PCE --------------------- */
733 /* Internal util function, returns true if sync is necessary, false otherwise */
734 bool update_best_pce(struct pcc_state
**pcc
, int best
)
736 PCEP_DEBUG(" recalculating pce precedence ");
738 struct pcc_state
*best_pcc_state
=
739 pcep_pcc_get_pcc_by_id(pcc
, best
);
740 if (best_pcc_state
->previous_best
!= best_pcc_state
->is_best
) {
741 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
742 best_pcc_state
->tag
, best_pcc_state
->id
,
743 best_pcc_state
->previous_best
);
747 " %s No Resynch best (%i) previous best (%i)",
748 best_pcc_state
->tag
, best_pcc_state
->id
,
749 best_pcc_state
->previous_best
);
752 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
758 int get_best_pce(struct pcc_state
**pcc
)
760 for (int i
= 0; i
< MAX_PCC
; i
++) {
761 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
762 if (pcc
[i
]->is_best
== true) {
770 int get_pce_count_connected(struct pcc_state
**pcc
)
773 for (int i
= 0; i
< MAX_PCC
; i
++) {
774 if (pcc
[i
] && pcc
[i
]->pce_opts
775 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
782 int get_previous_best_pce(struct pcc_state
**pcc
)
784 int previous_best_pce
= -1;
786 for (int i
= 0; i
< MAX_PCC
; i
++) {
787 if (pcc
[i
] && pcc
[i
]->pce_opts
&& pcc
[i
]->previous_best
== true
788 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
789 previous_best_pce
= i
;
793 return previous_best_pce
!= -1 ? pcc
[previous_best_pce
]->id
: 0;
796 /* Called by path_pcep_controller EV_REMOVE_PCC
797 * Event handler when a PCC is removed. */
798 int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state
*ctrl_state
,
799 struct pcc_state
**pcc
)
801 int new_best_pcc_id
= -1;
802 new_best_pcc_id
= pcep_pcc_calculate_best_pce(pcc
);
803 if (new_best_pcc_id
) {
804 if (update_best_pce(ctrl_state
->pcc
, new_best_pcc_id
) == true) {
805 pcep_thread_start_sync(ctrl_state
, new_best_pcc_id
);
812 /* Called by path_pcep_controller EV_SYNC_PATH
813 * Event handler when a path is sync'd. */
814 int pcep_pcc_multi_pce_sync_path(struct ctrl_state
*ctrl_state
, int pcc_id
,
815 struct pcc_state
**pcc
)
817 int previous_best_pcc_id
= -1;
819 if (pcc_id
== get_best_pce(pcc
)) {
820 previous_best_pcc_id
= get_previous_best_pce(pcc
);
821 if (previous_best_pcc_id
!= 0) {
822 /* while adding new pce, path has to resync to the
823 * previous best. pcep_thread_start_sync() will be
824 * called by the calling function */
825 if (update_best_pce(ctrl_state
->pcc
,
826 previous_best_pcc_id
)
828 cancel_comp_requests(
830 pcep_pcc_get_pcc_by_id(
831 pcc
, previous_best_pcc_id
));
832 pcep_thread_start_sync(ctrl_state
,
833 previous_best_pcc_id
);
841 /* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
843 int pcep_pcc_timer_update_best_pce(struct ctrl_state
*ctrl_state
, int pcc_id
)
846 /* resync whatever was the new best */
847 int prev_best
= get_best_pce(ctrl_state
->pcc
);
848 int best_id
= pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
849 if (best_id
&& prev_best
!= best_id
) { // Avoid Multiple call
850 struct pcc_state
*pcc_state
=
851 pcep_pcc_get_pcc_by_id(ctrl_state
->pcc
, best_id
);
852 if (update_best_pce(ctrl_state
->pcc
, pcc_state
->id
) == true) {
853 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
860 /* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
861 * Returns the best PCE id */
862 int pcep_pcc_calculate_best_pce(struct pcc_state
**pcc
)
864 int best_precedence
= 255; // DEFAULT_PCE_PRECEDENCE;
866 int one_connected_pce
= -1;
867 int previous_best_pce
= -1;
868 int step_0_best
= -1;
869 int step_0_previous
= -1;
873 for (int i
= 0; i
< MAX_PCC
; i
++) {
874 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
876 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
877 i
, pcc
[i
]->is_best
, pcc
[i
]->previous_best
);
880 if (pcc
[i
]->is_best
== true) {
883 if (pcc
[i
]->previous_best
== true) {
894 for (int i
= 0; i
< MAX_PCC
; i
++) {
895 if (pcc
[i
] && pcc
[i
]->pce_opts
896 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
897 one_connected_pce
= i
; // In case none better
898 if (pcc
[i
]->pce_opts
->precedence
<= best_precedence
) {
900 && pcc
[best_pce
]->pce_opts
->precedence
904 &pcc
[i
]->pce_opts
->addr
,
908 // collide of precedences so
912 if (!pcc
[i
]->previous_best
) {
924 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
925 step_0_best
, step_0_previous
, one_connected_pce
, best_pce
);
927 // Changed of state so ...
928 if (step_0_best
!= best_pce
) {
929 // Calculate previous
930 previous_best_pce
= step_0_best
;
932 if (step_0_best
!= -1) {
933 pcc
[step_0_best
]->is_best
= false;
935 if (step_0_previous
!= -1) {
936 pcc
[step_0_previous
]->previous_best
= false;
940 if (previous_best_pce
!= -1
941 && pcc
[previous_best_pce
]->status
942 == PCEP_PCC_DISCONNECTED
) {
943 pcc
[previous_best_pce
]->previous_best
= true;
944 zlog_debug("multi-pce: previous best pce (%i) ",
945 previous_best_pce
+ 1);
950 if (best_pce
!= -1) {
951 pcc
[best_pce
]->is_best
= true;
952 zlog_debug("multi-pce: best pce (%i) ", best_pce
+ 1);
954 if (one_connected_pce
!= -1) {
955 best_pce
= one_connected_pce
;
956 pcc
[one_connected_pce
]->is_best
= true;
958 "multi-pce: one connected best pce (default) (%i) ",
959 one_connected_pce
+ 1);
961 for (int i
= 0; i
< MAX_PCC
; i
++) {
962 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
964 pcc
[i
]->is_best
= true;
966 "(disconnected) best pce (default) (%i) ",
975 return ((best_pce
== -1) ? 0 : pcc
[best_pce
]->id
);
978 int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state
**pcc
,
979 struct pce_opts
*pce_opts
)
985 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
987 if ((ipaddr_cmp((const struct ipaddr
*)&pcc
[idx
]
989 (const struct ipaddr
*)&pce_opts
->addr
)
991 && pcc
[idx
]->pce_opts
->port
== pce_opts
->port
) {
992 zlog_debug("found pcc_id (%d) idx (%d)",
1001 int pcep_pcc_get_pcc_id_by_idx(struct pcc_state
**pcc
, int idx
)
1003 if (pcc
== NULL
|| idx
< 0) {
1007 return pcc
[idx
] ? pcc
[idx
]->id
: 0;
1010 struct pcc_state
*pcep_pcc_get_pcc_by_id(struct pcc_state
**pcc
, int id
)
1012 if (pcc
== NULL
|| id
< 0) {
1016 for (int i
= 0; i
< MAX_PCC
; i
++) {
1018 if (pcc
[i
]->id
== id
) {
1019 zlog_debug("found id (%d) pcc_idx (%d)",
1029 struct pcc_state
*pcep_pcc_get_pcc_by_name(struct pcc_state
**pcc
,
1030 const char *pce_name
)
1032 if (pcc
== NULL
|| pce_name
== NULL
) {
1036 for (int i
= 0; i
< MAX_PCC
; i
++) {
1037 if (pcc
[i
] == NULL
) {
1041 if (strcmp(pcc
[i
]->pce_opts
->pce_name
, pce_name
) == 0) {
1049 int pcep_pcc_get_pcc_idx_by_id(struct pcc_state
**pcc
, int id
)
1055 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1057 if (pcc
[idx
]->id
== id
) {
1058 zlog_debug("found pcc_id (%d) array_idx (%d)",
1068 int pcep_pcc_get_free_pcc_idx(struct pcc_state
**pcc
)
1070 assert(pcc
!= NULL
);
1072 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1073 if (pcc
[idx
] == NULL
) {
1074 zlog_debug("new pcc_idx (%d)", idx
);
1082 int pcep_pcc_get_pcc_id(struct pcc_state
*pcc
)
1084 return ((pcc
== NULL
) ? 0 : pcc
->id
);
1087 void pcep_pcc_copy_pcc_info(struct pcc_state
**pcc
,
1088 struct pcep_pcc_info
*pcc_info
)
1090 struct pcc_state
*pcc_state
=
1091 pcep_pcc_get_pcc_by_name(pcc
, pcc_info
->pce_name
);
1096 pcc_info
->ctrl_state
= NULL
;
1097 pcc_info
->msd
= pcc_state
->pcc_opts
->msd
;
1098 pcc_info
->pcc_port
= pcc_state
->pcc_opts
->port
;
1099 pcc_info
->next_plspid
= pcc_state
->next_plspid
;
1100 pcc_info
->next_reqid
= pcc_state
->next_reqid
;
1101 pcc_info
->status
= pcc_state
->status
;
1102 pcc_info
->pcc_id
= pcc_state
->id
;
1103 pcc_info
->is_best_multi_pce
= pcc_state
->is_best
;
1104 pcc_info
->previous_best
= pcc_state
->previous_best
;
1105 pcc_info
->precedence
=
1106 pcc_state
->pce_opts
? pcc_state
->pce_opts
->precedence
: 0;
1107 memcpy(&pcc_info
->pcc_addr
, &pcc_state
->pcc_addr_tr
,
1108 sizeof(struct ipaddr
));
1112 /*------------------ PCEP Message handlers --------------------- */
1114 void handle_pcep_open(struct ctrl_state
*ctrl_state
,
1115 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1117 assert(msg
->msg_header
->type
== PCEP_TYPE_OPEN
);
1118 pcep_lib_parse_capabilities(msg
, &pcc_state
->caps
);
1119 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1120 pcc_state
->caps
.is_stateful
? "stateful" : "stateless",
1121 pcc_state
->caps
.supported_ofs_are_known
1122 ? (pcc_state
->caps
.supported_ofs
== 0
1123 ? "no objective functions supported"
1124 : "supported objective functions are ")
1125 : "supported objective functions are unknown",
1126 format_objfun_set(pcc_state
->caps
.supported_ofs
));
1129 void handle_pcep_message(struct ctrl_state
*ctrl_state
,
1130 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1132 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
1135 switch (msg
->msg_header
->type
) {
1136 case PCEP_TYPE_INITIATE
:
1137 handle_pcep_lsp_initiate(ctrl_state
, pcc_state
, msg
);
1139 case PCEP_TYPE_UPDATE
:
1140 handle_pcep_lsp_update(ctrl_state
, pcc_state
, msg
);
1142 case PCEP_TYPE_PCREP
:
1143 handle_pcep_comp_reply(ctrl_state
, pcc_state
, msg
);
1146 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE
,
1147 "Unexpected pcep message from pceplib: %s",
1148 format_pcep_message(msg
));
1153 void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
1154 struct pcc_state
*pcc_state
,
1155 struct pcep_message
*msg
)
1157 char err
[MAX_ERROR_MSG_SIZE
] = "";
1159 path
= pcep_lib_parse_path(msg
);
1160 lookup_nbkey(pcc_state
, path
);
1161 /* TODO: Investigate if this is safe to do in the controller thread */
1162 path_pcep_config_lookup(path
);
1163 specialize_incoming_path(pcc_state
, path
);
1164 PCEP_DEBUG("%s Received LSP update", pcc_state
->tag
);
1165 PCEP_DEBUG_PATH("%s", format_path(path
));
1167 if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
)))
1168 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1170 /* FIXME: Monitor the amount of errors from the PCE and
1171 * possibly disconnect and blacklist */
1172 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1173 "Unsupported PCEP protocol feature: %s", err
);
1174 pcep_free_path(path
);
1178 void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
1179 struct pcc_state
*pcc_state
,
1180 struct pcep_message
*msg
)
1182 PCEP_DEBUG("%s Received LSP initiate, not supported yet",
1185 /* TODO when we support both PCC and PCE initiated sessions,
1186 * we should first check the session type before
1187 * rejecting this message. */
1188 send_pcep_error(pcc_state
, PCEP_ERRT_INVALID_OPERATION
,
1189 PCEP_ERRV_LSP_NOT_PCE_INITIATED
);
1192 void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
1193 struct pcc_state
*pcc_state
,
1194 struct pcep_message
*msg
)
1196 char err
[MAX_ERROR_MSG_SIZE
] = "";
1197 struct req_entry
*req
;
1200 path
= pcep_lib_parse_path(msg
);
1201 req
= pop_req(pcc_state
, path
->req_id
);
1203 /* TODO: check the rate of bad computation reply and close
1204 * the connection if more that a given rate.
1207 "%s Received computation reply for unknown request "
1209 pcc_state
->tag
, path
->req_id
);
1210 PCEP_DEBUG_PATH("%s", format_path(path
));
1211 send_pcep_error(pcc_state
, PCEP_ERRT_UNKNOWN_REQ_REF
,
1212 PCEP_ERRV_UNASSIGNED
);
1216 /* Cancel the computation request timeout */
1217 pcep_thread_cancel_timer(&req
->t_retry
);
1219 /* Transfer relevent metadata from the request to the response */
1220 path
->nbkey
= req
->path
->nbkey
;
1221 path
->plsp_id
= req
->path
->plsp_id
;
1222 path
->type
= req
->path
->type
;
1223 path
->name
= XSTRDUP(MTYPE_PCEP
, req
->path
->name
);
1224 specialize_incoming_path(pcc_state
, path
);
1226 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1227 pcc_state
->tag
, path
->req_id
,
1228 path
->no_path
? "true" : "false");
1229 PCEP_DEBUG_PATH("%s", format_path(path
));
1231 if (path
->no_path
) {
1232 PCEP_DEBUG("%s Computation for path %s did not find any result",
1233 pcc_state
->tag
, path
->name
);
1234 } else if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
))) {
1235 /* Updating a dynamic path will automatically delegate it */
1236 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1237 free_req_entry(req
);
1240 /* FIXME: Monitor the amount of errors from the PCE and
1241 * possibly disconnect and blacklist */
1242 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1243 "Unsupported PCEP protocol feature: %s", err
);
1246 pcep_free_path(path
);
1248 /* Delegate the path regardless of the outcome */
1249 /* TODO: For now we are using the path from the request, when
1250 * pathd API is thread safe, we could get a new path */
1251 if (pcc_state
->caps
.is_stateful
) {
1252 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
1253 pcc_state
->tag
, path
->name
, pcc_state
->originator
);
1254 path
= pcep_copy_path(req
->path
);
1255 path
->is_delegated
= true;
1256 send_report(pcc_state
, path
);
1257 pcep_free_path(path
);
1260 free_req_entry(req
);
1264 /* ------------ Internal Functions ------------ */
1266 const char *ipaddr_type_name(struct ipaddr
*addr
)
1268 if (IS_IPADDR_V4(addr
))
1270 if (IS_IPADDR_V6(addr
))
1275 bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
)
1277 return (IS_IPADDR_V4(&path
->nbkey
.endpoint
)
1278 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
))
1279 || (IS_IPADDR_V6(&path
->nbkey
.endpoint
)
1280 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1283 void select_pcc_addresses(struct pcc_state
*pcc_state
)
1285 /* If no IPv4 address was specified, try to get one from zebra */
1286 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1287 if (get_ipv4_router_id(&pcc_state
->pcc_addr_v4
)) {
1288 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
1292 /* If no IPv6 address was specified, try to get one from zebra */
1293 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1294 if (get_ipv6_router_id(&pcc_state
->pcc_addr_v6
)) {
1295 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
1300 void select_transport_address(struct pcc_state
*pcc_state
)
1302 struct ipaddr
*taddr
= &pcc_state
->pcc_addr_tr
;
1304 select_pcc_addresses(pcc_state
);
1306 taddr
->ipa_type
= IPADDR_NONE
;
1308 /* Select a transport source address in function of the configured PCE
1310 if (IS_IPADDR_V4(&pcc_state
->pce_opts
->addr
)) {
1311 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1312 taddr
->ipa_type
= IPADDR_V4
;
1313 taddr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1316 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1317 taddr
->ipa_type
= IPADDR_V6
;
1318 taddr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1323 void update_tag(struct pcc_state
*pcc_state
)
1325 if (pcc_state
->pce_opts
!= NULL
) {
1326 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1327 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1328 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1330 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1331 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1333 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1335 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1336 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1339 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
), "(%u)",
1344 void update_originator(struct pcc_state
*pcc_state
)
1347 if (pcc_state
->originator
!= NULL
) {
1348 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
1349 pcc_state
->originator
= NULL
;
1351 if (pcc_state
->pce_opts
== NULL
)
1353 originator
= XCALLOC(MTYPE_PCEP
, 52);
1354 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1355 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1356 snprintfrr(originator
, 52, "%pI6:%i",
1357 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1358 pcc_state
->pce_opts
->port
);
1360 snprintfrr(originator
, 52, "%pI4:%i",
1361 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1362 pcc_state
->pce_opts
->port
);
1364 pcc_state
->originator
= originator
;
1367 void schedule_reconnect(struct ctrl_state
*ctrl_state
,
1368 struct pcc_state
*pcc_state
)
1370 pcc_state
->retry_count
++;
1371 pcep_thread_schedule_reconnect(ctrl_state
, pcc_state
->id
,
1372 pcc_state
->retry_count
,
1373 &pcc_state
->t_reconnect
);
1374 if (pcc_state
->retry_count
== 1) {
1375 pcep_thread_schedule_sync_best_pce(
1376 ctrl_state
, pcc_state
->id
,
1377 pcc_state
->pce_opts
->config_opts
1378 .delegation_timeout_seconds
,
1379 &pcc_state
->t_update_best
);
1383 void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
1384 struct pcc_state
*pcc_state
)
1386 /* No need to schedule timeout if multiple PCEs are connected */
1387 if (get_pce_count_connected(ctrl_state
->pcc
)) {
1389 "schedule_session_timeout not setting timer for multi-pce mode");
1394 pcep_thread_schedule_session_timeout(
1395 ctrl_state
, pcep_pcc_get_pcc_id(pcc_state
),
1396 pcc_state
->pce_opts
->config_opts
1397 .session_timeout_inteval_seconds
,
1398 &pcc_state
->t_session_timeout
);
1401 void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
1402 struct pcc_state
*pcc_state
)
1404 /* No need to schedule timeout if multiple PCEs are connected */
1405 if (pcc_state
->t_session_timeout
== NULL
) {
1406 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1410 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1411 pcep_thread_cancel_timer(&pcc_state
->t_session_timeout
);
1412 pcc_state
->t_session_timeout
= NULL
;
1415 void send_pcep_message(struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1417 if (pcc_state
->sess
!= NULL
) {
1418 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state
->tag
,
1419 format_pcep_message(msg
));
1420 send_message(pcc_state
->sess
, msg
, true);
1424 void send_pcep_error(struct pcc_state
*pcc_state
,
1425 enum pcep_error_type error_type
,
1426 enum pcep_error_value error_value
)
1428 struct pcep_message
*msg
;
1429 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1430 pcc_state
->tag
, pcep_error_type_name(error_type
), error_type
,
1431 pcep_error_value_name(error_type
, error_value
), error_value
);
1432 msg
= pcep_lib_format_error(error_type
, error_value
);
1433 send_pcep_message(pcc_state
, msg
);
1436 void send_report(struct pcc_state
*pcc_state
, struct path
*path
)
1438 struct pcep_message
*report
;
1441 specialize_outgoing_path(pcc_state
, path
);
1442 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state
->tag
, path
->name
,
1444 report
= pcep_lib_format_report(&pcc_state
->caps
, path
);
1445 send_pcep_message(pcc_state
, report
);
1448 /* Updates the path for the PCE, updating the delegation and creation flags */
1449 void specialize_outgoing_path(struct pcc_state
*pcc_state
, struct path
*path
)
1451 bool is_delegated
= false;
1452 bool was_created
= false;
1454 lookup_plspid(pcc_state
, path
);
1456 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1457 path
->sender
= pcc_state
->pcc_addr_tr
;
1459 /* TODO: When the pathd API have a way to mark a path as
1460 * delegated, use it instead of considering all dynamic path
1461 * delegated. We need to disable the originator check for now,
1462 * because path could be delegated without having any originator yet */
1463 // if ((path->originator == NULL)
1464 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1465 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1466 // && (path->first_hop != NULL);
1467 // /* it seems the PCE consider updating an LSP a creation ?!?
1468 // at least Cisco does... */
1469 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1471 is_delegated
= (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
);
1472 was_created
= path
->update_origin
== SRTE_ORIGIN_PCEP
;
1474 path
->pcc_id
= pcc_state
->id
;
1475 path
->go_active
= is_delegated
&& pcc_state
->is_best
;
1476 path
->is_delegated
= is_delegated
&& pcc_state
->is_best
;
1477 path
->was_created
= was_created
;
1480 /* Updates the path for the PCC */
1481 void specialize_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
)
1483 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1484 path
->sender
= pcc_state
->pce_opts
->addr
;
1485 path
->pcc_id
= pcc_state
->id
;
1486 path
->update_origin
= SRTE_ORIGIN_PCEP
;
1487 path
->originator
= XSTRDUP(MTYPE_PCEP
, pcc_state
->originator
);
1490 /* Ensure the path can be handled by the PCC and if not, sends an error */
1491 bool validate_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
,
1492 char *errbuff
, size_t buffsize
)
1494 struct path_hop
*hop
;
1495 enum pcep_error_type err_type
= 0;
1496 enum pcep_error_value err_value
= PCEP_ERRV_UNASSIGNED
;
1498 for (hop
= path
->first_hop
; hop
!= NULL
; hop
= hop
->next
) {
1499 /* Hops without SID are not supported */
1500 if (!hop
->has_sid
) {
1501 snprintfrr(errbuff
, buffsize
, "SR segment without SID");
1502 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1503 err_value
= PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING
;
1506 /* Hops with non-MPLS SID are not supported */
1507 if (!hop
->is_mpls
) {
1508 snprintfrr(errbuff
, buffsize
,
1509 "SR segment with non-MPLS SID");
1510 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1511 err_value
= PCEP_ERRV_UNSUPPORTED_NAI
;
1516 if (err_type
!= 0) {
1517 send_pcep_error(pcc_state
, err_type
, err_value
);
1524 void send_comp_request(struct ctrl_state
*ctrl_state
,
1525 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1527 assert(req
!= NULL
);
1532 assert(req
->path
!= NULL
);
1533 assert(req
->path
->req_id
> 0);
1534 assert(RB_FIND(req_entry_head
, &pcc_state
->requests
, req
) == req
);
1535 assert(lookup_reqid(pcc_state
, req
->path
) == req
->path
->req_id
);
1539 struct pcep_message
*msg
;
1541 if (!pcc_state
->is_best
) {
1544 /* TODO: Add a timer to retry the computation request ? */
1546 specialize_outgoing_path(pcc_state
, req
->path
);
1549 "%s Sending computation request %d for path %s to %s (retry %d)",
1550 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1551 ipaddr2str(&req
->path
->nbkey
.endpoint
, buff
, sizeof(buff
)),
1553 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state
->tag
,
1554 req
->path
->name
, format_path(req
->path
));
1556 msg
= pcep_lib_format_request(&pcc_state
->caps
, req
->path
);
1557 send_pcep_message(pcc_state
, msg
);
1558 req
->was_sent
= true;
1560 /* TODO: Enable this back when the pcep config changes are merged back
1562 // timeout = pcc_state->pce_opts->config_opts.pcep_request_time_seconds;
1564 pcep_thread_schedule_timeout(ctrl_state
, pcc_state
->id
,
1565 TO_COMPUTATION_REQUEST
, timeout
,
1566 (void *)req
, &req
->t_retry
);
1569 void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
1570 struct pcc_state
*pcc_state
)
1572 struct req_entry
*req
, *safe_req
;
1574 RB_FOREACH_SAFE (req
, req_entry_head
, &pcc_state
->requests
, safe_req
) {
1575 cancel_comp_request(ctrl_state
, pcc_state
, req
);
1576 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1577 remove_reqid_mapping(pcc_state
, req
->path
);
1578 free_req_entry(req
);
1582 void cancel_comp_request(struct ctrl_state
*ctrl_state
,
1583 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1586 struct pcep_message
*msg
;
1588 if (req
->was_sent
) {
1589 /* TODO: Send a computation request cancelation
1590 * notification to the PCE */
1591 pcep_thread_cancel_timer(&req
->t_retry
);
1595 "%s Canceling computation request %d for path %s to %s (retry %d)",
1596 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1597 ipaddr2str(&req
->path
->nbkey
.endpoint
, buff
, sizeof(buff
)),
1599 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1600 pcc_state
->tag
, req
->path
->name
,
1601 format_path(req
->path
));
1603 msg
= pcep_lib_format_request_cancelled(req
->path
->req_id
);
1604 send_pcep_message(pcc_state
, msg
);
1607 void set_pcc_address(struct pcc_state
*pcc_state
, struct lsp_nb_key
*nbkey
,
1608 struct ipaddr
*addr
)
1610 select_pcc_addresses(pcc_state
);
1611 if (IS_IPADDR_V6(&nbkey
->endpoint
)) {
1612 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1613 addr
->ipa_type
= IPADDR_V6
;
1614 addr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1615 } else if (IS_IPADDR_V4(&nbkey
->endpoint
)) {
1616 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
));
1617 addr
->ipa_type
= IPADDR_V4
;
1618 addr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1620 addr
->ipa_type
= IPADDR_NONE
;
1625 /* ------------ Data Structure Helper Functions ------------ */
1627 void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
)
1629 struct plspid_map_data key
, *plspid_mapping
;
1630 struct nbkey_map_data
*nbkey_mapping
;
1632 if (path
->nbkey
.color
!= 0) {
1633 key
.nbkey
= path
->nbkey
;
1634 plspid_mapping
= plspid_map_find(&pcc_state
->plspid_map
, &key
);
1635 if (plspid_mapping
== NULL
) {
1637 XCALLOC(MTYPE_PCEP
, sizeof(*plspid_mapping
));
1638 plspid_mapping
->nbkey
= key
.nbkey
;
1639 plspid_mapping
->plspid
= pcc_state
->next_plspid
;
1640 plspid_map_add(&pcc_state
->plspid_map
, plspid_mapping
);
1642 XCALLOC(MTYPE_PCEP
, sizeof(*nbkey_mapping
));
1643 nbkey_mapping
->nbkey
= key
.nbkey
;
1644 nbkey_mapping
->plspid
= pcc_state
->next_plspid
;
1645 nbkey_map_add(&pcc_state
->nbkey_map
, nbkey_mapping
);
1646 pcc_state
->next_plspid
++;
1647 // FIXME: Send some error to the PCE isntead of crashing
1648 assert(pcc_state
->next_plspid
<= 1048576);
1650 path
->plsp_id
= plspid_mapping
->plspid
;
1654 void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
)
1656 struct nbkey_map_data key
, *mapping
;
1657 // TODO: Should give an error to the PCE instead of crashing
1658 assert(path
->plsp_id
!= 0);
1659 key
.plspid
= path
->plsp_id
;
1660 mapping
= nbkey_map_find(&pcc_state
->nbkey_map
, &key
);
1661 assert(mapping
!= NULL
);
1662 path
->nbkey
= mapping
->nbkey
;
1665 void free_req_entry(struct req_entry
*req
)
1667 pcep_free_path(req
->path
);
1668 XFREE(MTYPE_PCEP
, req
);
1671 struct req_entry
*push_new_req(struct pcc_state
*pcc_state
, struct path
*path
)
1673 struct req_entry
*req
;
1675 req
= XCALLOC(MTYPE_PCEP
, sizeof(*req
));
1676 req
->retry_count
= 0;
1677 req
->path
= pcep_copy_path(path
);
1678 repush_req(pcc_state
, req
);
1683 void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
)
1685 uint32_t reqid
= pcc_state
->next_reqid
;
1688 req
->was_sent
= false;
1689 req
->path
->req_id
= reqid
;
1690 res
= RB_INSERT(req_entry_head
, &pcc_state
->requests
, req
);
1691 assert(res
== NULL
);
1692 assert(add_reqid_mapping(pcc_state
, req
->path
) == true);
1694 pcc_state
->next_reqid
+= 1;
1695 /* Wrapping is allowed, but 0 is not a valid id */
1696 if (pcc_state
->next_reqid
== 0)
1697 pcc_state
->next_reqid
= 1;
1700 struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
)
1702 struct path path
= {.req_id
= reqid
};
1703 struct req_entry key
= {.path
= &path
};
1704 struct req_entry
*req
;
1706 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1709 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1710 remove_reqid_mapping(pcc_state
, req
->path
);
1715 bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1717 struct req_map_data
*mapping
;
1718 mapping
= XCALLOC(MTYPE_PCEP
, sizeof(*mapping
));
1719 mapping
->nbkey
= path
->nbkey
;
1720 mapping
->reqid
= path
->req_id
;
1721 if (req_map_add(&pcc_state
->req_map
, mapping
) != NULL
) {
1722 XFREE(MTYPE_PCEP
, mapping
);
1728 void remove_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1730 struct req_map_data key
, *mapping
;
1731 key
.nbkey
= path
->nbkey
;
1732 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1733 if (mapping
!= NULL
) {
1734 req_map_del(&pcc_state
->req_map
, mapping
);
1735 XFREE(MTYPE_PCEP
, mapping
);
1739 uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
)
1741 struct req_map_data key
, *mapping
;
1742 key
.nbkey
= path
->nbkey
;
1743 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1744 if (mapping
!= NULL
)
1745 return mapping
->reqid
;
1749 bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
)
1751 return lookup_reqid(pcc_state
, path
) != 0;
1755 /* ------------ Data Structure Callbacks ------------ */
1757 #define CMP_RETURN(A, B) \
1759 return (A < B) ? -1 : 1
1761 static uint32_t hash_nbkey(const struct lsp_nb_key
*nbkey
)
1764 hash
= jhash_2words(nbkey
->color
, nbkey
->preference
, 0x55aa5a5a);
1765 switch (nbkey
->endpoint
.ipa_type
) {
1767 return jhash(&nbkey
->endpoint
.ipaddr_v4
,
1768 sizeof(nbkey
->endpoint
.ipaddr_v4
), hash
);
1770 return jhash(&nbkey
->endpoint
.ipaddr_v6
,
1771 sizeof(nbkey
->endpoint
.ipaddr_v6
), hash
);
1777 static int cmp_nbkey(const struct lsp_nb_key
*a
, const struct lsp_nb_key
*b
)
1779 CMP_RETURN(a
->color
, b
->color
);
1780 int cmp
= ipaddr_cmp(&a
->endpoint
, &b
->endpoint
);
1783 CMP_RETURN(a
->preference
, b
->preference
);
1787 int plspid_map_cmp(const struct plspid_map_data
*a
,
1788 const struct plspid_map_data
*b
)
1790 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1793 uint32_t plspid_map_hash(const struct plspid_map_data
*e
)
1795 return hash_nbkey(&e
->nbkey
);
1798 int nbkey_map_cmp(const struct nbkey_map_data
*a
,
1799 const struct nbkey_map_data
*b
)
1801 CMP_RETURN(a
->plspid
, b
->plspid
);
1805 uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
)
1810 int req_map_cmp(const struct req_map_data
*a
, const struct req_map_data
*b
)
1812 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1815 uint32_t req_map_hash(const struct req_map_data
*e
)
1817 return hash_nbkey(&e
->nbkey
);